2024-11-20 22:23:13,572 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 22:23:13,585 main DEBUG Took 0.010700 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 22:23:13,585 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 22:23:13,586 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 22:23:13,587 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 22:23:13,588 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,595 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 22:23:13,607 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,608 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,609 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,610 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,610 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,610 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,611 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,612 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,612 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,612 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,613 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,613 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,614 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,614 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,615 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,615 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,616 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,616 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,616 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,617 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,617 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,618 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,618 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,618 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:13,619 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,619 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 22:23:13,621 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:13,623 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 22:23:13,626 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 22:23:13,627 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 22:23:13,628 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 22:23:13,629 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 22:23:13,637 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 22:23:13,640 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 22:23:13,642 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 22:23:13,642 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 22:23:13,642 main DEBUG createAppenders(={Console}) 2024-11-20 22:23:13,643 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-20 22:23:13,644 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 22:23:13,644 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-20 22:23:13,644 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 22:23:13,645 main DEBUG OutputStream closed 2024-11-20 22:23:13,645 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 22:23:13,645 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 22:23:13,646 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-20 22:23:13,719 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 22:23:13,721 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 22:23:13,722 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 22:23:13,723 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 22:23:13,723 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 22:23:13,724 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 22:23:13,724 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 22:23:13,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 22:23:13,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 22:23:13,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 22:23:13,726 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 22:23:13,726 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 22:23:13,726 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 22:23:13,727 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 22:23:13,727 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 22:23:13,727 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 22:23:13,728 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 22:23:13,729 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 22:23:13,731 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 22:23:13,732 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-20 22:23:13,732 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 22:23:13,734 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-20T22:23:13,986 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d 2024-11-20 22:23:13,990 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 22:23:13,990 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T22:23:14,002 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-20T22:23:14,030 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T22:23:14,035 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/cluster_4bf5016f-69f0-b7cd-6e5c-cf5b66ac688f, deleteOnExit=true 2024-11-20T22:23:14,036 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-20T22:23:14,037 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/test.cache.data in system properties and HBase conf 2024-11-20T22:23:14,038 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T22:23:14,039 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/hadoop.log.dir in system properties and HBase conf 2024-11-20T22:23:14,040 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T22:23:14,041 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T22:23:14,042 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-20T22:23:14,160 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T22:23:14,250 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T22:23:14,255 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T22:23:14,256 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T22:23:14,256 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T22:23:14,257 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T22:23:14,258 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T22:23:14,258 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T22:23:14,258 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T22:23:14,259 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T22:23:14,259 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T22:23:14,260 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/nfs.dump.dir in system properties and HBase conf 2024-11-20T22:23:14,260 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/java.io.tmpdir in system properties and HBase conf 2024-11-20T22:23:14,260 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T22:23:14,261 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T22:23:14,261 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T22:23:15,257 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T22:23:15,326 INFO [Time-limited test {}] log.Log(170): Logging initialized @2551ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T22:23:15,392 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T22:23:15,468 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T22:23:15,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T22:23:15,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T22:23:15,498 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T22:23:15,509 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T22:23:15,512 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73882ca4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/hadoop.log.dir/,AVAILABLE} 2024-11-20T22:23:15,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@588be694{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T22:23:15,725 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f0d4558{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/java.io.tmpdir/jetty-localhost-46141-hadoop-hdfs-3_4_1-tests_jar-_-any-16176473559278815901/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T22:23:15,735 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a299586{HTTP/1.1, (http/1.1)}{localhost:46141} 2024-11-20T22:23:15,735 INFO [Time-limited test {}] server.Server(415): Started @2961ms 2024-11-20T22:23:16,233 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T22:23:16,241 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T22:23:16,242 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T22:23:16,242 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T22:23:16,242 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T22:23:16,243 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57582772{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/hadoop.log.dir/,AVAILABLE} 2024-11-20T22:23:16,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63d4d645{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T22:23:16,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bd2e890{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/java.io.tmpdir/jetty-localhost-39835-hadoop-hdfs-3_4_1-tests_jar-_-any-8875897501442334541/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T22:23:16,353 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d3fa6ef{HTTP/1.1, (http/1.1)}{localhost:39835} 2024-11-20T22:23:16,353 INFO [Time-limited test {}] server.Server(415): Started @3578ms 2024-11-20T22:23:16,418 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T22:23:17,917 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/cluster_4bf5016f-69f0-b7cd-6e5c-cf5b66ac688f/dfs/data/data2/current/BP-1425926944-172.17.0.2-1732141394839/current, will proceed with Du for space computation calculation, 2024-11-20T22:23:17,917 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/cluster_4bf5016f-69f0-b7cd-6e5c-cf5b66ac688f/dfs/data/data1/current/BP-1425926944-172.17.0.2-1732141394839/current, will proceed with Du for space computation calculation, 2024-11-20T22:23:17,966 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T22:23:18,031 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcaded9ab757b9ff3 with lease ID 0x1f4ac86eb26290ea: Processing first storage report for DS-1d6c4156-c0a8-4f58-babd-98f06f69b52f from datanode DatanodeRegistration(127.0.0.1:34119, datanodeUuid=add97091-1a74-4fba-98fe-43eb274f7e88, infoPort=40295, infoSecurePort=0, ipcPort=38349, storageInfo=lv=-57;cid=testClusterID;nsid=805701225;c=1732141394839) 2024-11-20T22:23:18,032 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcaded9ab757b9ff3 with lease ID 0x1f4ac86eb26290ea: from storage DS-1d6c4156-c0a8-4f58-babd-98f06f69b52f node DatanodeRegistration(127.0.0.1:34119, datanodeUuid=add97091-1a74-4fba-98fe-43eb274f7e88, infoPort=40295, infoSecurePort=0, ipcPort=38349, storageInfo=lv=-57;cid=testClusterID;nsid=805701225;c=1732141394839), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T22:23:18,033 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcaded9ab757b9ff3 with lease ID 0x1f4ac86eb26290ea: Processing first storage report for DS-78573738-793c-48d4-8668-2158d14870ce from datanode DatanodeRegistration(127.0.0.1:34119, datanodeUuid=add97091-1a74-4fba-98fe-43eb274f7e88, infoPort=40295, infoSecurePort=0, ipcPort=38349, storageInfo=lv=-57;cid=testClusterID;nsid=805701225;c=1732141394839) 2024-11-20T22:23:18,033 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcaded9ab757b9ff3 with lease ID 0x1f4ac86eb26290ea: from storage DS-78573738-793c-48d4-8668-2158d14870ce node DatanodeRegistration(127.0.0.1:34119, datanodeUuid=add97091-1a74-4fba-98fe-43eb274f7e88, infoPort=40295, infoSecurePort=0, ipcPort=38349, storageInfo=lv=-57;cid=testClusterID;nsid=805701225;c=1732141394839), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T22:23:18,091 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d 2024-11-20T22:23:18,201 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/cluster_4bf5016f-69f0-b7cd-6e5c-cf5b66ac688f/zookeeper_0, clientPort=51822, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/cluster_4bf5016f-69f0-b7cd-6e5c-cf5b66ac688f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/cluster_4bf5016f-69f0-b7cd-6e5c-cf5b66ac688f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T22:23:18,217 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=51822 2024-11-20T22:23:18,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:18,239 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:18,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741825_1001 (size=7) 2024-11-20T22:23:18,957 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a with version=8 2024-11-20T22:23:18,958 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/hbase-staging 2024-11-20T22:23:19,111 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T22:23:19,380 INFO [Time-limited test {}] client.ConnectionUtils(129): master/6365a1e51efd:0 server-side Connection retries=45 2024-11-20T22:23:19,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T22:23:19,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T22:23:19,396 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T22:23:19,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T22:23:19,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T22:23:19,557 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T22:23:19,615 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T22:23:19,624 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T22:23:19,628 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T22:23:19,652 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 51506 (auto-detected) 2024-11-20T22:23:19,653 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T22:23:19,671 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41349 2024-11-20T22:23:19,678 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:19,680 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:19,693 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:41349 connecting to ZooKeeper ensemble=127.0.0.1:51822 2024-11-20T22:23:19,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:413490x0, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T22:23:19,763 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41349-0x1015ba1db0a0000 connected 2024-11-20T22:23:19,852 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T22:23:19,855 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T22:23:19,859 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T22:23:19,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41349 2024-11-20T22:23:19,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41349 2024-11-20T22:23:19,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41349 2024-11-20T22:23:19,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41349 2024-11-20T22:23:19,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41349 2024-11-20T22:23:19,879 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a, hbase.cluster.distributed=false 2024-11-20T22:23:19,951 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/6365a1e51efd:0 server-side Connection retries=45 2024-11-20T22:23:19,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T22:23:19,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T22:23:19,952 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T22:23:19,953 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T22:23:19,953 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T22:23:19,955 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T22:23:19,959 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T22:23:19,960 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44631 2024-11-20T22:23:19,962 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T22:23:19,972 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T22:23:19,974 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:19,977 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:19,982 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44631 connecting to ZooKeeper ensemble=127.0.0.1:51822 2024-11-20T22:23:19,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446310x0, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T22:23:19,992 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:446310x0, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T22:23:19,994 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:446310x0, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T22:23:19,996 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44631-0x1015ba1db0a0001 connected 2024-11-20T22:23:19,996 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T22:23:19,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44631 2024-11-20T22:23:19,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44631 2024-11-20T22:23:20,003 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44631 2024-11-20T22:23:20,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44631 2024-11-20T22:23:20,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44631 2024-11-20T22:23:20,014 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/6365a1e51efd,41349,1732141399102 2024-11-20T22:23:20,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T22:23:20,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T22:23:20,033 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6365a1e51efd,41349,1732141399102 2024-11-20T22:23:20,034 DEBUG [M:0;6365a1e51efd:41349 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6365a1e51efd:41349 2024-11-20T22:23:20,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T22:23:20,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T22:23:20,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:20,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:20,059 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T22:23:20,061 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6365a1e51efd,41349,1732141399102 from backup master directory 2024-11-20T22:23:20,061 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T22:23:20,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6365a1e51efd,41349,1732141399102 2024-11-20T22:23:20,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T22:23:20,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T22:23:20,072 WARN [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T22:23:20,072 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6365a1e51efd,41349,1732141399102 2024-11-20T22:23:20,075 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T22:23:20,082 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T22:23:20,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741826_1002 (size=42) 2024-11-20T22:23:20,174 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/hbase.id with ID: abc495db-2e1d-41e6-b972-9cd1225f96f6 2024-11-20T22:23:20,256 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:20,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:20,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:20,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741827_1003 (size=196) 2024-11-20T22:23:20,335 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:23:20,337 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T22:23:20,354 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:20,358 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T22:23:20,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741828_1004 (size=1189) 2024-11-20T22:23:20,809 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store 2024-11-20T22:23:20,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741829_1005 (size=34) 2024-11-20T22:23:20,833 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T22:23:20,834 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:20,835 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T22:23:20,835 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:23:20,835 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:23:20,835 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T22:23:20,835 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:23:20,835 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:23:20,836 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T22:23:20,838 WARN [master/6365a1e51efd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/.initializing 2024-11-20T22:23:20,838 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/WALs/6365a1e51efd,41349,1732141399102 2024-11-20T22:23:20,845 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T22:23:20,857 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6365a1e51efd%2C41349%2C1732141399102, suffix=, logDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/WALs/6365a1e51efd,41349,1732141399102, archiveDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/oldWALs, maxLogs=10 2024-11-20T22:23:20,888 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/WALs/6365a1e51efd,41349,1732141399102/6365a1e51efd%2C41349%2C1732141399102.1732141400863, exclude list is [], retry=0 2024-11-20T22:23:20,910 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34119,DS-1d6c4156-c0a8-4f58-babd-98f06f69b52f,DISK] 2024-11-20T22:23:20,914 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-20T22:23:20,950 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/WALs/6365a1e51efd,41349,1732141399102/6365a1e51efd%2C41349%2C1732141399102.1732141400863 2024-11-20T22:23:20,951 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40295:40295)] 2024-11-20T22:23:20,952 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:23:20,952 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:20,955 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:20,956 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:20,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:21,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T22:23:21,029 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:21,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:21,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:21,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T22:23:21,038 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:21,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:21,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:21,044 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T22:23:21,044 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:21,045 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:21,046 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:21,049 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T22:23:21,049 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:21,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:21,054 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:21,056 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:21,066 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T22:23:21,070 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:21,075 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:23:21,077 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72399452, jitterRate=0.07883590459823608}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T22:23:21,083 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T22:23:21,084 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T22:23:21,116 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50c812e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:21,148 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-20T22:23:21,161 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T22:23:21,161 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T22:23:21,164 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T22:23:21,165 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-20T22:23:21,172 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 6 msec 2024-11-20T22:23:21,172 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T22:23:21,208 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T22:23:21,226 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T22:23:21,266 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-20T22:23:21,269 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T22:23:21,271 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T22:23:21,279 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-20T22:23:21,282 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T22:23:21,287 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T22:23:21,296 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-20T22:23:21,298 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T22:23:21,308 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T22:23:21,323 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T22:23:21,333 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T22:23:21,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T22:23:21,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T22:23:21,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:21,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:21,350 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=6365a1e51efd,41349,1732141399102, sessionid=0x1015ba1db0a0000, setting cluster-up flag (Was=false) 2024-11-20T22:23:21,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:21,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:21,549 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T22:23:21,553 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6365a1e51efd,41349,1732141399102 2024-11-20T22:23:21,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:21,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:21,687 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T22:23:21,689 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6365a1e51efd,41349,1732141399102 2024-11-20T22:23:21,728 DEBUG [RS:0;6365a1e51efd:44631 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6365a1e51efd:44631 2024-11-20T22:23:21,729 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1008): ClusterId : abc495db-2e1d-41e6-b972-9cd1225f96f6 2024-11-20T22:23:21,733 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T22:23:21,764 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-20T22:23:21,769 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-20T22:23:21,772 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T22:23:21,777 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T22:23:21,778 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T22:23:21,777 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6365a1e51efd,41349,1732141399102 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T22:23:21,780 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6365a1e51efd:0, corePoolSize=5, maxPoolSize=5 2024-11-20T22:23:21,780 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6365a1e51efd:0, corePoolSize=5, maxPoolSize=5 2024-11-20T22:23:21,781 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6365a1e51efd:0, corePoolSize=5, maxPoolSize=5 2024-11-20T22:23:21,781 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6365a1e51efd:0, corePoolSize=5, maxPoolSize=5 2024-11-20T22:23:21,781 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6365a1e51efd:0, corePoolSize=10, maxPoolSize=10 2024-11-20T22:23:21,781 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:21,781 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6365a1e51efd:0, corePoolSize=2, maxPoolSize=2 2024-11-20T22:23:21,781 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:21,782 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732141431782 2024-11-20T22:23:21,784 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T22:23:21,785 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T22:23:21,786 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T22:23:21,786 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-20T22:23:21,787 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T22:23:21,788 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T22:23:21,788 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T22:23:21,788 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T22:23:21,789 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:21,791 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T22:23:21,791 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:21,791 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T22:23:21,792 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T22:23:21,792 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T22:23:21,794 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T22:23:21,794 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T22:23:21,796 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6365a1e51efd:0:becomeActiveMaster-HFileCleaner.large.0-1732141401795,5,FailOnTimeoutGroup] 2024-11-20T22:23:21,796 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6365a1e51efd:0:becomeActiveMaster-HFileCleaner.small.0-1732141401796,5,FailOnTimeoutGroup] 2024-11-20T22:23:21,796 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:21,797 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T22:23:21,798 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:21,798 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:21,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741831_1007 (size=1039) 2024-11-20T22:23:21,810 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T22:23:21,811 DEBUG [RS:0;6365a1e51efd:44631 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77418855, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:21,813 DEBUG [RS:0;6365a1e51efd:44631 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a440ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6365a1e51efd/172.17.0.2:0 2024-11-20T22:23:21,817 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-20T22:23:21,817 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-20T22:23:21,818 DEBUG [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-20T22:23:21,820 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(3073): reportForDuty to master=6365a1e51efd,41349,1732141399102 with isa=6365a1e51efd/172.17.0.2:44631, startcode=1732141399950 2024-11-20T22:23:21,830 DEBUG [RS:0;6365a1e51efd:44631 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T22:23:21,864 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32971, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T22:23:21,869 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41349 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:21,871 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41349 {}] master.ServerManager(486): Registering regionserver=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:21,885 DEBUG [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a 2024-11-20T22:23:21,885 DEBUG [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41121 2024-11-20T22:23:21,885 DEBUG [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-20T22:23:21,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T22:23:21,900 DEBUG [RS:0;6365a1e51efd:44631 {}] zookeeper.ZKUtil(111): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6365a1e51efd,44631,1732141399950 2024-11-20T22:23:21,900 WARN [RS:0;6365a1e51efd:44631 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T22:23:21,901 INFO [RS:0;6365a1e51efd:44631 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T22:23:21,901 DEBUG [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/WALs/6365a1e51efd,44631,1732141399950 2024-11-20T22:23:21,902 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6365a1e51efd,44631,1732141399950] 2024-11-20T22:23:21,913 DEBUG [RS:0;6365a1e51efd:44631 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-20T22:23:21,923 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T22:23:21,939 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T22:23:21,941 INFO [RS:0;6365a1e51efd:44631 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T22:23:21,942 INFO [RS:0;6365a1e51efd:44631 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:21,943 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-20T22:23:21,950 INFO [RS:0;6365a1e51efd:44631 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:21,950 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:21,950 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:21,950 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:21,951 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:21,951 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:21,951 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6365a1e51efd:0, corePoolSize=2, maxPoolSize=2 2024-11-20T22:23:21,951 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:21,951 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:21,951 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:21,952 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:21,952 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:21,952 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6365a1e51efd:0, corePoolSize=3, maxPoolSize=3 2024-11-20T22:23:21,952 DEBUG [RS:0;6365a1e51efd:44631 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0, corePoolSize=3, maxPoolSize=3 2024-11-20T22:23:21,953 INFO [RS:0;6365a1e51efd:44631 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:21,953 INFO [RS:0;6365a1e51efd:44631 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:21,953 INFO [RS:0;6365a1e51efd:44631 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:21,953 INFO [RS:0;6365a1e51efd:44631 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:21,953 INFO [RS:0;6365a1e51efd:44631 {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,44631,1732141399950-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T22:23:21,975 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T22:23:21,977 INFO [RS:0;6365a1e51efd:44631 {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,44631,1732141399950-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:22,005 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.Replication(204): 6365a1e51efd,44631,1732141399950 started 2024-11-20T22:23:22,005 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1767): Serving as 6365a1e51efd,44631,1732141399950, RpcServer on 6365a1e51efd/172.17.0.2:44631, sessionid=0x1015ba1db0a0001 2024-11-20T22:23:22,007 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T22:23:22,007 DEBUG [RS:0;6365a1e51efd:44631 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:22,007 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6365a1e51efd,44631,1732141399950' 2024-11-20T22:23:22,007 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T22:23:22,011 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T22:23:22,012 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T22:23:22,012 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T22:23:22,012 DEBUG [RS:0;6365a1e51efd:44631 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:22,012 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6365a1e51efd,44631,1732141399950' 2024-11-20T22:23:22,012 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T22:23:22,013 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T22:23:22,014 DEBUG [RS:0;6365a1e51efd:44631 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T22:23:22,014 INFO [RS:0;6365a1e51efd:44631 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T22:23:22,014 INFO [RS:0;6365a1e51efd:44631 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T22:23:22,120 INFO [RS:0;6365a1e51efd:44631 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T22:23:22,124 INFO [RS:0;6365a1e51efd:44631 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6365a1e51efd%2C44631%2C1732141399950, suffix=, logDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/WALs/6365a1e51efd,44631,1732141399950, archiveDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/oldWALs, maxLogs=32 2024-11-20T22:23:22,142 DEBUG [RS:0;6365a1e51efd:44631 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/WALs/6365a1e51efd,44631,1732141399950/6365a1e51efd%2C44631%2C1732141399950.1732141402127, exclude list is [], retry=0 2024-11-20T22:23:22,148 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34119,DS-1d6c4156-c0a8-4f58-babd-98f06f69b52f,DISK] 2024-11-20T22:23:22,151 INFO [RS:0;6365a1e51efd:44631 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/WALs/6365a1e51efd,44631,1732141399950/6365a1e51efd%2C44631%2C1732141399950.1732141402127 2024-11-20T22:23:22,153 DEBUG [RS:0;6365a1e51efd:44631 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40295:40295)] 2024-11-20T22:23:22,204 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-20T22:23:22,204 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a 2024-11-20T22:23:22,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741833_1009 (size=32) 2024-11-20T22:23:22,625 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:22,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T22:23:22,631 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T22:23:22,632 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:22,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:22,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T22:23:22,636 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T22:23:22,636 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:22,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:22,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T22:23:22,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T22:23:22,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:22,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:22,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740 2024-11-20T22:23:22,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740 2024-11-20T22:23:22,647 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:23:22,650 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T22:23:22,655 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:23:22,656 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67060997, jitterRate=-7.132738828659058E-4}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:23:22,659 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T22:23:22,659 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T22:23:22,659 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T22:23:22,659 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T22:23:22,659 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T22:23:22,659 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T22:23:22,660 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T22:23:22,661 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T22:23:22,663 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T22:23:22,663 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-20T22:23:22,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T22:23:22,676 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T22:23:22,679 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T22:23:22,832 DEBUG [6365a1e51efd:41349 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T22:23:22,841 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:22,847 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6365a1e51efd,44631,1732141399950, state=OPENING 2024-11-20T22:23:22,857 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T22:23:22,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:22,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:22,867 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T22:23:22,867 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T22:23:22,869 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:23:23,041 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:23,042 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T22:23:23,045 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48268, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T22:23:23,056 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-20T22:23:23,057 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T22:23:23,058 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-20T22:23:23,061 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6365a1e51efd%2C44631%2C1732141399950.meta, suffix=.meta, logDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/WALs/6365a1e51efd,44631,1732141399950, archiveDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/oldWALs, maxLogs=32 2024-11-20T22:23:23,079 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/WALs/6365a1e51efd,44631,1732141399950/6365a1e51efd%2C44631%2C1732141399950.meta.1732141403064.meta, exclude list is [], retry=0 2024-11-20T22:23:23,083 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34119,DS-1d6c4156-c0a8-4f58-babd-98f06f69b52f,DISK] 2024-11-20T22:23:23,086 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/WALs/6365a1e51efd,44631,1732141399950/6365a1e51efd%2C44631%2C1732141399950.meta.1732141403064.meta 2024-11-20T22:23:23,086 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40295:40295)] 2024-11-20T22:23:23,087 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:23:23,088 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T22:23:23,139 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T22:23:23,144 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T22:23:23,148 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T22:23:23,148 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:23,148 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-20T22:23:23,148 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-20T22:23:23,152 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T22:23:23,154 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T22:23:23,154 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:23,155 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:23,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T22:23:23,158 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T22:23:23,158 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:23,159 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:23,159 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T22:23:23,161 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T22:23:23,161 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:23,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:23,164 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740 2024-11-20T22:23:23,166 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740 2024-11-20T22:23:23,169 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:23:23,172 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T22:23:23,174 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72623207, jitterRate=0.08217011392116547}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:23:23,175 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T22:23:23,182 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732141403036 2024-11-20T22:23:23,192 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T22:23:23,192 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-20T22:23:23,193 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:23,196 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6365a1e51efd,44631,1732141399950, state=OPEN 2024-11-20T22:23:23,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T22:23:23,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T22:23:23,231 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T22:23:23,231 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T22:23:23,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T22:23:23,237 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=6365a1e51efd,44631,1732141399950 in 362 msec 2024-11-20T22:23:23,245 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T22:23:23,245 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 570 msec 2024-11-20T22:23:23,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5270 sec 2024-11-20T22:23:23,251 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732141403251, completionTime=-1 2024-11-20T22:23:23,251 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T22:23:23,251 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-20T22:23:23,290 DEBUG [hconnection-0x42e0417a-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:23,294 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48270, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:23,304 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-20T22:23:23,304 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732141463304 2024-11-20T22:23:23,305 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732141523305 2024-11-20T22:23:23,305 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 53 msec 2024-11-20T22:23:23,344 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,41349,1732141399102-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:23,344 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,41349,1732141399102-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:23,345 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,41349,1732141399102-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:23,346 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6365a1e51efd:41349, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:23,347 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:23,354 DEBUG [master/6365a1e51efd:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-20T22:23:23,355 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-20T22:23:23,357 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T22:23:23,364 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-20T22:23:23,367 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:23:23,368 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:23,371 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:23:23,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741835_1011 (size=358) 2024-11-20T22:23:23,787 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 451355caa9251e00fdfd2d0a5e7a8871, NAME => 'hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a 2024-11-20T22:23:23,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741836_1012 (size=42) 2024-11-20T22:23:24,203 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:24,203 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 451355caa9251e00fdfd2d0a5e7a8871, disabling compactions & flushes 2024-11-20T22:23:24,203 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. 2024-11-20T22:23:24,204 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. 2024-11-20T22:23:24,204 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. after waiting 0 ms 2024-11-20T22:23:24,204 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. 2024-11-20T22:23:24,204 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. 2024-11-20T22:23:24,205 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 451355caa9251e00fdfd2d0a5e7a8871: 2024-11-20T22:23:24,208 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:23:24,216 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732141404209"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141404209"}]},"ts":"1732141404209"} 2024-11-20T22:23:24,243 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:23:24,245 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:23:24,249 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141404245"}]},"ts":"1732141404245"} 2024-11-20T22:23:24,253 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-20T22:23:24,275 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=451355caa9251e00fdfd2d0a5e7a8871, ASSIGN}] 2024-11-20T22:23:24,278 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=451355caa9251e00fdfd2d0a5e7a8871, ASSIGN 2024-11-20T22:23:24,280 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=451355caa9251e00fdfd2d0a5e7a8871, ASSIGN; state=OFFLINE, location=6365a1e51efd,44631,1732141399950; forceNewPlan=false, retain=false 2024-11-20T22:23:24,431 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=451355caa9251e00fdfd2d0a5e7a8871, regionState=OPENING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:24,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 451355caa9251e00fdfd2d0a5e7a8871, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:23:24,590 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:24,597 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. 2024-11-20T22:23:24,597 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 451355caa9251e00fdfd2d0a5e7a8871, NAME => 'hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:23:24,598 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 451355caa9251e00fdfd2d0a5e7a8871 2024-11-20T22:23:24,598 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:24,598 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 451355caa9251e00fdfd2d0a5e7a8871 2024-11-20T22:23:24,598 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 451355caa9251e00fdfd2d0a5e7a8871 2024-11-20T22:23:24,601 INFO [StoreOpener-451355caa9251e00fdfd2d0a5e7a8871-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 451355caa9251e00fdfd2d0a5e7a8871 2024-11-20T22:23:24,603 INFO [StoreOpener-451355caa9251e00fdfd2d0a5e7a8871-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 451355caa9251e00fdfd2d0a5e7a8871 columnFamilyName info 2024-11-20T22:23:24,603 DEBUG [StoreOpener-451355caa9251e00fdfd2d0a5e7a8871-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:24,604 INFO [StoreOpener-451355caa9251e00fdfd2d0a5e7a8871-1 {}] regionserver.HStore(327): Store=451355caa9251e00fdfd2d0a5e7a8871/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:24,606 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/namespace/451355caa9251e00fdfd2d0a5e7a8871 2024-11-20T22:23:24,607 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/namespace/451355caa9251e00fdfd2d0a5e7a8871 2024-11-20T22:23:24,611 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 451355caa9251e00fdfd2d0a5e7a8871 2024-11-20T22:23:24,615 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/namespace/451355caa9251e00fdfd2d0a5e7a8871/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:23:24,616 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 451355caa9251e00fdfd2d0a5e7a8871; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67154577, jitterRate=6.811767816543579E-4}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T22:23:24,617 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 451355caa9251e00fdfd2d0a5e7a8871: 2024-11-20T22:23:24,619 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871., pid=6, masterSystemTime=1732141404590 2024-11-20T22:23:24,622 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. 2024-11-20T22:23:24,622 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. 2024-11-20T22:23:24,623 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=451355caa9251e00fdfd2d0a5e7a8871, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:24,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T22:23:24,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 451355caa9251e00fdfd2d0a5e7a8871, server=6365a1e51efd,44631,1732141399950 in 192 msec 2024-11-20T22:23:24,637 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T22:23:24,637 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=451355caa9251e00fdfd2d0a5e7a8871, ASSIGN in 357 msec 2024-11-20T22:23:24,639 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:23:24,639 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141404639"}]},"ts":"1732141404639"} 2024-11-20T22:23:24,642 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-20T22:23:24,656 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:23:24,660 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2990 sec 2024-11-20T22:23:24,669 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-20T22:23:24,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:24,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-20T22:23:24,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:24,707 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-20T22:23:24,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T22:23:24,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 41 msec 2024-11-20T22:23:24,752 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-20T22:23:24,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T22:23:24,791 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 39 msec 2024-11-20T22:23:24,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-20T22:23:24,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-20T22:23:24,829 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.756sec 2024-11-20T22:23:24,831 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T22:23:24,832 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T22:23:24,833 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T22:23:24,834 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T22:23:24,834 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T22:23:24,834 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,41349,1732141399102-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T22:23:24,835 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,41349,1732141399102-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T22:23:24,842 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-20T22:23:24,842 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T22:23:24,843 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,41349,1732141399102-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:24,933 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a3c3fb3 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560d619d 2024-11-20T22:23:24,934 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-20T22:23:24,952 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e64d5c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:24,960 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T22:23:24,960 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T22:23:24,985 DEBUG [hconnection-0x5b22ea21-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:24,999 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48282, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:25,011 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=6365a1e51efd,41349,1732141399102 2024-11-20T22:23:25,027 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=458, ProcessCount=11, AvailableMemoryMB=2648 2024-11-20T22:23:25,058 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:23:25,064 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53060, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:23:25,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:23:25,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:23:25,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T22:23:25,085 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:23:25,091 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:25,091 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-20T22:23:25,093 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:23:25,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T22:23:25,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741837_1013 (size=960) 2024-11-20T22:23:25,122 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a 2024-11-20T22:23:25,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741838_1014 (size=53) 2024-11-20T22:23:25,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T22:23:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T22:23:25,540 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:25,540 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 36a256a4871d36dc6632cf0cdb971cbb, disabling compactions & flushes 2024-11-20T22:23:25,540 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:25,540 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:25,540 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. after waiting 0 ms 2024-11-20T22:23:25,540 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:25,540 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:25,540 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:25,543 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:23:25,543 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732141405543"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141405543"}]},"ts":"1732141405543"} 2024-11-20T22:23:25,547 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:23:25,549 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:23:25,549 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141405549"}]},"ts":"1732141405549"} 2024-11-20T22:23:25,552 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T22:23:25,583 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=36a256a4871d36dc6632cf0cdb971cbb, ASSIGN}] 2024-11-20T22:23:25,585 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=36a256a4871d36dc6632cf0cdb971cbb, ASSIGN 2024-11-20T22:23:25,586 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=36a256a4871d36dc6632cf0cdb971cbb, ASSIGN; state=OFFLINE, location=6365a1e51efd,44631,1732141399950; forceNewPlan=false, retain=false 2024-11-20T22:23:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T22:23:25,738 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=36a256a4871d36dc6632cf0cdb971cbb, regionState=OPENING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:25,747 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:23:25,903 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:25,911 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:25,911 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:23:25,912 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:25,912 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:25,912 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:25,913 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:25,916 INFO [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:25,919 INFO [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:23:25,920 INFO [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a256a4871d36dc6632cf0cdb971cbb columnFamilyName A 2024-11-20T22:23:25,920 DEBUG [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:25,921 INFO [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] regionserver.HStore(327): Store=36a256a4871d36dc6632cf0cdb971cbb/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:25,921 INFO [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:25,924 INFO [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:23:25,924 INFO [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a256a4871d36dc6632cf0cdb971cbb columnFamilyName B 2024-11-20T22:23:25,925 DEBUG [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:25,925 INFO [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] regionserver.HStore(327): Store=36a256a4871d36dc6632cf0cdb971cbb/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:25,926 INFO [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:25,928 INFO [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:23:25,928 INFO [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a256a4871d36dc6632cf0cdb971cbb columnFamilyName C 2024-11-20T22:23:25,928 DEBUG [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:25,929 INFO [StoreOpener-36a256a4871d36dc6632cf0cdb971cbb-1 {}] regionserver.HStore(327): Store=36a256a4871d36dc6632cf0cdb971cbb/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:25,929 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:25,931 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:25,931 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:25,934 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:23:25,936 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:25,939 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:23:25,940 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 36a256a4871d36dc6632cf0cdb971cbb; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59883788, jitterRate=-0.10766202211380005}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:23:25,941 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:25,942 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., pid=11, masterSystemTime=1732141405902 2024-11-20T22:23:25,945 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:25,945 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:25,946 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=36a256a4871d36dc6632cf0cdb971cbb, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:25,956 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-20T22:23:25,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 in 204 msec 2024-11-20T22:23:25,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T22:23:25,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=36a256a4871d36dc6632cf0cdb971cbb, ASSIGN in 373 msec 2024-11-20T22:23:25,964 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:23:25,964 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141405964"}]},"ts":"1732141405964"} 2024-11-20T22:23:25,967 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T22:23:26,009 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:23:26,013 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 933 msec 2024-11-20T22:23:26,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T22:23:26,216 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-20T22:23:26,223 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e59596a to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@30640414 2024-11-20T22:23:26,238 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36ea98cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:26,241 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:26,243 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48296, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:26,246 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:23:26,249 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53076, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:23:26,257 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2cac4303 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@536a4a58 2024-11-20T22:23:26,267 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b3a6cb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:26,269 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x39b10898 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3888ad7c 2024-11-20T22:23:26,281 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29b132d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:26,283 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d7115de to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2dd0bbda 2024-11-20T22:23:26,292 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dd77b4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:26,294 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x30d4d4c6 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18f2a76d 2024-11-20T22:23:26,309 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a33c837, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:26,311 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x054c943d to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@435176b2 2024-11-20T22:23:26,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37577c9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:26,324 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f0c7188 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e957ecd 2024-11-20T22:23:26,347 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37950159, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:26,348 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x475ca0f4 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22daddc4 2024-11-20T22:23:26,359 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d5a9f0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:26,361 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x50c9c1d1 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39028e20 2024-11-20T22:23:26,372 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d4c9c1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:26,375 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f1331a9 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@624dc5e5 2024-11-20T22:23:26,392 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bb819cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:26,398 DEBUG [hconnection-0x59c3390f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:26,399 DEBUG [hconnection-0x48af454-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:26,399 DEBUG [hconnection-0xb857c1a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:26,400 DEBUG [hconnection-0x7b5d524b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:26,402 DEBUG [hconnection-0x17b3816e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:26,403 DEBUG [hconnection-0x23bc3a57-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:26,403 DEBUG [hconnection-0x134cb240-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:26,405 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48308, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:26,406 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48318, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:26,406 DEBUG [hconnection-0x4a22ee9a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:26,408 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:26,410 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48350, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:26,411 DEBUG [hconnection-0x2ce6d49-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:26,412 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:26,414 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48362, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:26,414 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48374, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:26,420 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48376, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:26,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-20T22:23:26,424 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:26,425 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:26,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:26,427 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:26,429 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:26,432 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48392, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:26,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:26,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:26,578 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:23:26,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:26,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:26,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:26,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:26,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:26,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:26,594 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:26,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:26,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:26,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:26,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:26,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:26,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:26,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:26,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141466711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:26,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141466712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:26,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141466717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:26,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:26,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141466727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:26,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141466727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/80c90a30c2fb49c6b7f4eb3b7c2455e4 is 50, key is test_row_0/A:col10/1732141406456/Put/seqid=0 2024-11-20T22:23:26,800 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:26,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:26,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:26,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:26,806 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:26,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:26,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741839_1015 (size=14341) 2024-11-20T22:23:26,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/80c90a30c2fb49c6b7f4eb3b7c2455e4 2024-11-20T22:23:26,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:26,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:26,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141466864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:26,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141466864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:26,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141466864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:26,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141466863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:26,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141466866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,951 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/2415d35151b543e8bc9dbb15405334cd is 50, key is test_row_0/B:col10/1732141406456/Put/seqid=0 2024-11-20T22:23:26,968 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:26,969 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:26,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:26,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:26,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:26,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:26,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:26,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741840_1016 (size=12001) 2024-11-20T22:23:27,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:27,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141467078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141467078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141467081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141467080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141467078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,140 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:27,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:27,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:27,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:27,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,296 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:27,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:27,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:27,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:27,302 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141467391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141467393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141467393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141467393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141467393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/2415d35151b543e8bc9dbb15405334cd 2024-11-20T22:23:27,459 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:27,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:27,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:27,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:27,461 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/870458fbd0864dca996687d25b9667cf is 50, key is test_row_0/C:col10/1732141406456/Put/seqid=0 2024-11-20T22:23:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:27,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741841_1017 (size=12001) 2024-11-20T22:23:27,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/870458fbd0864dca996687d25b9667cf 2024-11-20T22:23:27,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/80c90a30c2fb49c6b7f4eb3b7c2455e4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/80c90a30c2fb49c6b7f4eb3b7c2455e4 2024-11-20T22:23:27,621 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:27,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:27,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:27,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:27,625 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/80c90a30c2fb49c6b7f4eb3b7c2455e4, entries=200, sequenceid=15, filesize=14.0 K 2024-11-20T22:23:27,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:27,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/2415d35151b543e8bc9dbb15405334cd as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2415d35151b543e8bc9dbb15405334cd 2024-11-20T22:23:27,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2415d35151b543e8bc9dbb15405334cd, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T22:23:27,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/870458fbd0864dca996687d25b9667cf as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/870458fbd0864dca996687d25b9667cf 2024-11-20T22:23:27,702 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/870458fbd0864dca996687d25b9667cf, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T22:23:27,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 36a256a4871d36dc6632cf0cdb971cbb in 1127ms, sequenceid=15, compaction requested=false 2024-11-20T22:23:27,707 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T22:23:27,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:27,783 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:27,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:27,785 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:23:27,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:27,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:27,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:27,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:27,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:27,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:27,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/1a7462b665f741b5982270fe0212ebc6 is 50, key is test_row_0/A:col10/1732141406710/Put/seqid=0 2024-11-20T22:23:27,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741842_1018 (size=12001) 2024-11-20T22:23:27,902 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/1a7462b665f741b5982270fe0212ebc6 2024-11-20T22:23:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:27,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:27,917 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T22:23:27,918 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-20T22:23:27,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/0b221af380ae426ea963d7551ea2de0a is 50, key is test_row_0/B:col10/1732141406710/Put/seqid=0 2024-11-20T22:23:27,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741843_1019 (size=12001) 2024-11-20T22:23:27,965 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/0b221af380ae426ea963d7551ea2de0a 2024-11-20T22:23:27,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141467949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141467958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141467961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:27,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141467968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:27,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141467968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/76551996944a49ba83a86b26521d0c4c is 50, key is test_row_0/C:col10/1732141406710/Put/seqid=0 2024-11-20T22:23:28,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741844_1020 (size=12001) 2024-11-20T22:23:28,048 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/76551996944a49ba83a86b26521d0c4c 2024-11-20T22:23:28,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/1a7462b665f741b5982270fe0212ebc6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/1a7462b665f741b5982270fe0212ebc6 2024-11-20T22:23:28,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141468086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141468087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141468090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141468091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141468092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,106 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/1a7462b665f741b5982270fe0212ebc6, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:23:28,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/0b221af380ae426ea963d7551ea2de0a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0b221af380ae426ea963d7551ea2de0a 2024-11-20T22:23:28,128 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0b221af380ae426ea963d7551ea2de0a, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:23:28,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/76551996944a49ba83a86b26521d0c4c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/76551996944a49ba83a86b26521d0c4c 2024-11-20T22:23:28,152 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/76551996944a49ba83a86b26521d0c4c, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:23:28,155 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 36a256a4871d36dc6632cf0cdb971cbb in 370ms, sequenceid=38, compaction requested=false 2024-11-20T22:23:28,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:28,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:28,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-20T22:23:28,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-20T22:23:28,164 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-20T22:23:28,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7290 sec 2024-11-20T22:23:28,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.7520 sec 2024-11-20T22:23:28,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:28,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:23:28,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:28,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:28,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:28,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:28,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:28,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:28,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d5d04779900b4329886724b28a0b77b6 is 50, key is test_row_0/A:col10/1732141408300/Put/seqid=0 2024-11-20T22:23:28,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741845_1021 (size=16681) 2024-11-20T22:23:28,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d5d04779900b4329886724b28a0b77b6 2024-11-20T22:23:28,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141468364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141468373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141468373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141468379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141468376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/c01a093f53524a9881358c83a1181ab3 is 50, key is test_row_0/B:col10/1732141408300/Put/seqid=0 2024-11-20T22:23:28,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741846_1022 (size=12001) 2024-11-20T22:23:28,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/c01a093f53524a9881358c83a1181ab3 2024-11-20T22:23:28,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141468490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/c7a73ecdd9dc4ff8aaba2aa5b767facb is 50, key is test_row_0/C:col10/1732141408300/Put/seqid=0 2024-11-20T22:23:28,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141468497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141468500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141468504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141468512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741847_1023 (size=12001) 2024-11-20T22:23:28,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/c7a73ecdd9dc4ff8aaba2aa5b767facb 2024-11-20T22:23:28,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:28,555 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-20T22:23:28,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d5d04779900b4329886724b28a0b77b6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d5d04779900b4329886724b28a0b77b6 2024-11-20T22:23:28,560 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:28,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-20T22:23:28,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T22:23:28,571 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:28,572 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d5d04779900b4329886724b28a0b77b6, entries=250, sequenceid=53, filesize=16.3 K 2024-11-20T22:23:28,574 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:28,574 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:28,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/c01a093f53524a9881358c83a1181ab3 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c01a093f53524a9881358c83a1181ab3 2024-11-20T22:23:28,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c01a093f53524a9881358c83a1181ab3, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T22:23:28,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/c7a73ecdd9dc4ff8aaba2aa5b767facb as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c7a73ecdd9dc4ff8aaba2aa5b767facb 2024-11-20T22:23:28,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c7a73ecdd9dc4ff8aaba2aa5b767facb, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T22:23:28,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 36a256a4871d36dc6632cf0cdb971cbb in 319ms, sequenceid=53, compaction requested=true 2024-11-20T22:23:28,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:28,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:28,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:28,639 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:28,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:28,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:28,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:28,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:23:28,640 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:28,643 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:28,644 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/A is initiating minor compaction (all files) 2024-11-20T22:23:28,644 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/A in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:28,645 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/80c90a30c2fb49c6b7f4eb3b7c2455e4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/1a7462b665f741b5982270fe0212ebc6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d5d04779900b4329886724b28a0b77b6] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=42.0 K 2024-11-20T22:23:28,647 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:28,647 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/B is initiating minor compaction (all files) 2024-11-20T22:23:28,647 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/B in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:28,648 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2415d35151b543e8bc9dbb15405334cd, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0b221af380ae426ea963d7551ea2de0a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c01a093f53524a9881358c83a1181ab3] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=35.2 K 2024-11-20T22:23:28,649 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2415d35151b543e8bc9dbb15405334cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141406456 2024-11-20T22:23:28,651 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80c90a30c2fb49c6b7f4eb3b7c2455e4, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141406456 2024-11-20T22:23:28,652 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b221af380ae426ea963d7551ea2de0a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141406709 2024-11-20T22:23:28,652 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a7462b665f741b5982270fe0212ebc6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141406709 2024-11-20T22:23:28,653 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c01a093f53524a9881358c83a1181ab3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141407948 2024-11-20T22:23:28,653 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5d04779900b4329886724b28a0b77b6, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141407948 2024-11-20T22:23:28,665 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T22:23:28,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T22:23:28,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:28,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:23:28,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:28,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:28,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:28,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:28,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:28,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:28,732 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#B#compaction#10 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:28,733 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/97f37a19e7834d40bd73ef845c3c23de is 50, key is test_row_0/B:col10/1732141408300/Put/seqid=0 2024-11-20T22:23:28,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T22:23:28,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:28,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:28,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:28,736 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:28,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:28,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:28,742 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#A#compaction#9 average throughput is 0.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:28,743 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/8ff1cec0656141a2a0b2e1408096ec43 is 50, key is test_row_0/A:col10/1732141408300/Put/seqid=0 2024-11-20T22:23:28,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741848_1024 (size=12104) 2024-11-20T22:23:28,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/c814267dee164e148335994d2b891a2f is 50, key is test_row_0/A:col10/1732141408375/Put/seqid=0 2024-11-20T22:23:28,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741849_1025 (size=12104) 2024-11-20T22:23:28,841 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/8ff1cec0656141a2a0b2e1408096ec43 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ff1cec0656141a2a0b2e1408096ec43 2024-11-20T22:23:28,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741850_1026 (size=12001) 2024-11-20T22:23:28,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/c814267dee164e148335994d2b891a2f 2024-11-20T22:23:28,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141468833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141468838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141468840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T22:23:28,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141468848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141468848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,888 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/A of 36a256a4871d36dc6632cf0cdb971cbb into 8ff1cec0656141a2a0b2e1408096ec43(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:28,888 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:28,888 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/A, priority=13, startTime=1732141408624; duration=0sec 2024-11-20T22:23:28,889 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:28,889 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:A 2024-11-20T22:23:28,889 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:28,891 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/50ae2840bdf7465aa01cb0edd1ad3111 is 50, key is test_row_0/B:col10/1732141408375/Put/seqid=0 2024-11-20T22:23:28,893 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:28,894 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/C is initiating minor compaction (all files) 2024-11-20T22:23:28,894 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/C in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:28,894 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/870458fbd0864dca996687d25b9667cf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/76551996944a49ba83a86b26521d0c4c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c7a73ecdd9dc4ff8aaba2aa5b767facb] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=35.2 K 2024-11-20T22:23:28,894 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T22:23:28,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:28,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:28,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:28,896 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:28,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:28,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:28,901 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 870458fbd0864dca996687d25b9667cf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141406456 2024-11-20T22:23:28,903 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76551996944a49ba83a86b26521d0c4c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141406709 2024-11-20T22:23:28,905 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7a73ecdd9dc4ff8aaba2aa5b767facb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141407948 2024-11-20T22:23:28,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741851_1027 (size=12001) 2024-11-20T22:23:28,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/50ae2840bdf7465aa01cb0edd1ad3111 2024-11-20T22:23:28,960 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#C#compaction#13 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:28,961 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/a30fcf4df94a43ec876973d5b97c82ff is 50, key is test_row_0/C:col10/1732141408300/Put/seqid=0 2024-11-20T22:23:28,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141468970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141468970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141468973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141468974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:28,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141468981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:28,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/dad1988fb8204c228b059ce3865d9028 is 50, key is test_row_0/C:col10/1732141408375/Put/seqid=0 2024-11-20T22:23:29,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741852_1028 (size=12104) 2024-11-20T22:23:29,049 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T22:23:29,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:29,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:29,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:29,051 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:29,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:29,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:29,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741853_1029 (size=12001) 2024-11-20T22:23:29,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/dad1988fb8204c228b059ce3865d9028 2024-11-20T22:23:29,074 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/a30fcf4df94a43ec876973d5b97c82ff as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/a30fcf4df94a43ec876973d5b97c82ff 2024-11-20T22:23:29,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/c814267dee164e148335994d2b891a2f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/c814267dee164e148335994d2b891a2f 2024-11-20T22:23:29,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/c814267dee164e148335994d2b891a2f, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T22:23:29,091 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/C of 36a256a4871d36dc6632cf0cdb971cbb into a30fcf4df94a43ec876973d5b97c82ff(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:29,091 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:29,091 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/C, priority=13, startTime=1732141408639; duration=0sec 2024-11-20T22:23:29,091 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:29,091 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:C 2024-11-20T22:23:29,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/50ae2840bdf7465aa01cb0edd1ad3111 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/50ae2840bdf7465aa01cb0edd1ad3111 2024-11-20T22:23:29,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/50ae2840bdf7465aa01cb0edd1ad3111, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T22:23:29,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/dad1988fb8204c228b059ce3865d9028 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/dad1988fb8204c228b059ce3865d9028 2024-11-20T22:23:29,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/dad1988fb8204c228b059ce3865d9028, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T22:23:29,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 36a256a4871d36dc6632cf0cdb971cbb in 402ms, sequenceid=75, compaction requested=false 2024-11-20T22:23:29,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:29,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T22:23:29,208 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T22:23:29,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:29,212 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:23:29,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:29,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:29,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:29,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:29,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:29,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:29,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:29,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:29,221 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/97f37a19e7834d40bd73ef845c3c23de as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/97f37a19e7834d40bd73ef845c3c23de 2024-11-20T22:23:29,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/515239f0d0474cf5b6abc0eddd4474f2 is 50, key is test_row_0/A:col10/1732141408786/Put/seqid=0 2024-11-20T22:23:29,241 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/B of 36a256a4871d36dc6632cf0cdb971cbb into 97f37a19e7834d40bd73ef845c3c23de(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:29,242 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:29,242 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/B, priority=13, startTime=1732141408639; duration=0sec 2024-11-20T22:23:29,242 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:29,243 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:B 2024-11-20T22:23:29,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741854_1030 (size=12001) 2024-11-20T22:23:29,315 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/515239f0d0474cf5b6abc0eddd4474f2 2024-11-20T22:23:29,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141469296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141469296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141469300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141469304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141469314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/1b1ebd3069f5481496f4ae94e27493d8 is 50, key is test_row_0/B:col10/1732141408786/Put/seqid=0 2024-11-20T22:23:29,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741855_1031 (size=12001) 2024-11-20T22:23:29,404 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/1b1ebd3069f5481496f4ae94e27493d8 2024-11-20T22:23:29,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141469418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141469419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141469431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141469431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141469432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/844515dd2e2746ffb8d623dfa88cca3b is 50, key is test_row_0/C:col10/1732141408786/Put/seqid=0 2024-11-20T22:23:29,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741856_1032 (size=12001) 2024-11-20T22:23:29,499 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/844515dd2e2746ffb8d623dfa88cca3b 2024-11-20T22:23:29,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/515239f0d0474cf5b6abc0eddd4474f2 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/515239f0d0474cf5b6abc0eddd4474f2 2024-11-20T22:23:29,528 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/515239f0d0474cf5b6abc0eddd4474f2, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T22:23:29,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/1b1ebd3069f5481496f4ae94e27493d8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/1b1ebd3069f5481496f4ae94e27493d8 2024-11-20T22:23:29,548 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/1b1ebd3069f5481496f4ae94e27493d8, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T22:23:29,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/844515dd2e2746ffb8d623dfa88cca3b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/844515dd2e2746ffb8d623dfa88cca3b 2024-11-20T22:23:29,584 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/844515dd2e2746ffb8d623dfa88cca3b, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T22:23:29,599 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 36a256a4871d36dc6632cf0cdb971cbb in 387ms, sequenceid=93, compaction requested=true 2024-11-20T22:23:29,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:29,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:29,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-20T22:23:29,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-20T22:23:29,605 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-20T22:23:29,605 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0270 sec 2024-11-20T22:23:29,611 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.0460 sec 2024-11-20T22:23:29,612 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T22:23:29,612 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T22:23:29,615 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-20T22:23:29,615 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-20T22:23:29,616 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T22:23:29,617 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T22:23:29,617 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T22:23:29,617 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T22:23:29,620 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T22:23:29,620 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T22:23:29,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:29,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T22:23:29,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:29,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:29,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:29,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:29,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:29,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:29,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/30cd97e34684495c8144f68d5d79129c is 50, key is test_row_0/A:col10/1732141409636/Put/seqid=0 2024-11-20T22:23:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T22:23:29,680 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-20T22:23:29,683 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:29,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-20T22:23:29,686 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:29,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T22:23:29,688 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:29,688 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:29,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141469675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741857_1033 (size=16681) 2024-11-20T22:23:29,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/30cd97e34684495c8144f68d5d79129c 2024-11-20T22:23:29,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141469689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141469690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141469697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141469700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/f95bf22089e649d7a5b59e04ee228bc4 is 50, key is test_row_0/B:col10/1732141409636/Put/seqid=0 2024-11-20T22:23:29,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741858_1034 (size=12001) 2024-11-20T22:23:29,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/f95bf22089e649d7a5b59e04ee228bc4 2024-11-20T22:23:29,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T22:23:29,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141469797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141469806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/9b50f8cfa24b44fdbd37f2ce44d19761 is 50, key is test_row_0/C:col10/1732141409636/Put/seqid=0 2024-11-20T22:23:29,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141469810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:29,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141469816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141469817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,845 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:29,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T22:23:29,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:29,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:29,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:29,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:29,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:29,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:29,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741859_1035 (size=12001) 2024-11-20T22:23:29,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T22:23:30,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T22:23:30,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:30,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:30,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:30,005 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:30,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:30,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:30,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141470009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141470021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141470022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141470020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141470029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,159 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T22:23:30,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:30,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:30,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:30,162 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:30,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:30,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:30,272 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/9b50f8cfa24b44fdbd37f2ce44d19761 2024-11-20T22:23:30,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/30cd97e34684495c8144f68d5d79129c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/30cd97e34684495c8144f68d5d79129c 2024-11-20T22:23:30,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T22:23:30,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/30cd97e34684495c8144f68d5d79129c, entries=250, sequenceid=116, filesize=16.3 K 2024-11-20T22:23:30,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/f95bf22089e649d7a5b59e04ee228bc4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/f95bf22089e649d7a5b59e04ee228bc4 2024-11-20T22:23:30,318 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T22:23:30,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:30,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:30,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:30,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/f95bf22089e649d7a5b59e04ee228bc4, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T22:23:30,320 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:30,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:30,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:30,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/9b50f8cfa24b44fdbd37f2ce44d19761 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/9b50f8cfa24b44fdbd37f2ce44d19761 2024-11-20T22:23:30,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141470327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141470340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141470340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141470340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141470340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/9b50f8cfa24b44fdbd37f2ce44d19761, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T22:23:30,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 36a256a4871d36dc6632cf0cdb971cbb in 717ms, sequenceid=116, compaction requested=true 2024-11-20T22:23:30,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:30,357 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:30,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:30,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:30,360 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:30,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:30,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:30,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:30,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:30,367 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52787 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:30,367 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/A is initiating minor compaction (all files) 2024-11-20T22:23:30,367 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/A in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:30,368 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ff1cec0656141a2a0b2e1408096ec43, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/c814267dee164e148335994d2b891a2f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/515239f0d0474cf5b6abc0eddd4474f2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/30cd97e34684495c8144f68d5d79129c] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=51.5 K 2024-11-20T22:23:30,368 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:30,369 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/B is initiating minor compaction (all files) 2024-11-20T22:23:30,369 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/B in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:30,369 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/97f37a19e7834d40bd73ef845c3c23de, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/50ae2840bdf7465aa01cb0edd1ad3111, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/1b1ebd3069f5481496f4ae94e27493d8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/f95bf22089e649d7a5b59e04ee228bc4] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=47.0 K 2024-11-20T22:23:30,370 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ff1cec0656141a2a0b2e1408096ec43, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141407948 2024-11-20T22:23:30,372 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 97f37a19e7834d40bd73ef845c3c23de, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141407948 2024-11-20T22:23:30,373 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting c814267dee164e148335994d2b891a2f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732141408358 2024-11-20T22:23:30,375 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 50ae2840bdf7465aa01cb0edd1ad3111, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732141408358 2024-11-20T22:23:30,377 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 515239f0d0474cf5b6abc0eddd4474f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141408786 2024-11-20T22:23:30,378 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30cd97e34684495c8144f68d5d79129c, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732141409292 2024-11-20T22:23:30,379 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b1ebd3069f5481496f4ae94e27493d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141408786 2024-11-20T22:23:30,386 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting f95bf22089e649d7a5b59e04ee228bc4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732141409300 2024-11-20T22:23:30,438 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#B#compaction#21 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:30,439 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/89abe8a0bc54401faf0126f2f5ec8c43 is 50, key is test_row_0/B:col10/1732141409636/Put/seqid=0 2024-11-20T22:23:30,447 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#A#compaction#22 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:30,448 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/4d4289662fed4e0699621cd323b8c89c is 50, key is test_row_0/A:col10/1732141409636/Put/seqid=0 2024-11-20T22:23:30,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741860_1036 (size=12241) 2024-11-20T22:23:30,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741861_1037 (size=12241) 2024-11-20T22:23:30,509 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T22:23:30,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:30,514 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:23:30,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:30,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:30,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:30,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:30,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:30,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:30,528 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/89abe8a0bc54401faf0126f2f5ec8c43 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/89abe8a0bc54401faf0126f2f5ec8c43 2024-11-20T22:23:30,542 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/4d4289662fed4e0699621cd323b8c89c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/4d4289662fed4e0699621cd323b8c89c 2024-11-20T22:23:30,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/8acc2a831433477bac8151b848bd4a75 is 50, key is test_row_0/A:col10/1732141409683/Put/seqid=0 2024-11-20T22:23:30,551 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/B of 36a256a4871d36dc6632cf0cdb971cbb into 89abe8a0bc54401faf0126f2f5ec8c43(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:30,551 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:30,551 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/B, priority=12, startTime=1732141410359; duration=0sec 2024-11-20T22:23:30,553 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:30,554 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:B 2024-11-20T22:23:30,554 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:30,558 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:30,558 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/C is initiating minor compaction (all files) 2024-11-20T22:23:30,558 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/C in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:30,559 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/a30fcf4df94a43ec876973d5b97c82ff, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/dad1988fb8204c228b059ce3865d9028, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/844515dd2e2746ffb8d623dfa88cca3b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/9b50f8cfa24b44fdbd37f2ce44d19761] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=47.0 K 2024-11-20T22:23:30,560 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting a30fcf4df94a43ec876973d5b97c82ff, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141407948 2024-11-20T22:23:30,561 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/A of 36a256a4871d36dc6632cf0cdb971cbb into 4d4289662fed4e0699621cd323b8c89c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:30,561 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:30,561 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/A, priority=12, startTime=1732141410357; duration=0sec 2024-11-20T22:23:30,561 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:30,561 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting dad1988fb8204c228b059ce3865d9028, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732141408358 2024-11-20T22:23:30,561 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:A 2024-11-20T22:23:30,562 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 844515dd2e2746ffb8d623dfa88cca3b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141408786 2024-11-20T22:23:30,565 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b50f8cfa24b44fdbd37f2ce44d19761, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732141409300 2024-11-20T22:23:30,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741862_1038 (size=12101) 2024-11-20T22:23:30,611 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#C#compaction#24 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:30,612 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/7ed7bb30ced048d6867478e260e0d35c is 50, key is test_row_0/C:col10/1732141409636/Put/seqid=0 2024-11-20T22:23:30,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741863_1039 (size=12241) 2024-11-20T22:23:30,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T22:23:30,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:30,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:30,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141470915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141470919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141470920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:30,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141470923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:30,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141470923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,003 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/8acc2a831433477bac8151b848bd4a75 2024-11-20T22:23:31,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141471027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/3c3fb9b57c19490a86431879ca842d15 is 50, key is test_row_0/B:col10/1732141409683/Put/seqid=0 2024-11-20T22:23:31,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141471040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141471040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141471049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741864_1040 (size=12101) 2024-11-20T22:23:31,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141471040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,087 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/7ed7bb30ced048d6867478e260e0d35c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/7ed7bb30ced048d6867478e260e0d35c 2024-11-20T22:23:31,115 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/C of 36a256a4871d36dc6632cf0cdb971cbb into 7ed7bb30ced048d6867478e260e0d35c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:31,115 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:31,116 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/C, priority=12, startTime=1732141410360; duration=0sec 2024-11-20T22:23:31,116 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:31,116 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:C 2024-11-20T22:23:31,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141471240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141471254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141471254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141471253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141471267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,457 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/3c3fb9b57c19490a86431879ca842d15 2024-11-20T22:23:31,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/f9805001a90b427b808d2e1aca9ece95 is 50, key is test_row_0/C:col10/1732141409683/Put/seqid=0 2024-11-20T22:23:31,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741865_1041 (size=12101) 2024-11-20T22:23:31,532 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/f9805001a90b427b808d2e1aca9ece95 2024-11-20T22:23:31,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/8acc2a831433477bac8151b848bd4a75 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8acc2a831433477bac8151b848bd4a75 2024-11-20T22:23:31,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141471544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,558 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8acc2a831433477bac8151b848bd4a75, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T22:23:31,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/3c3fb9b57c19490a86431879ca842d15 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/3c3fb9b57c19490a86431879ca842d15 2024-11-20T22:23:31,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141471561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141471564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141471562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,577 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/3c3fb9b57c19490a86431879ca842d15, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T22:23:31,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/f9805001a90b427b808d2e1aca9ece95 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/f9805001a90b427b808d2e1aca9ece95 2024-11-20T22:23:31,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:31,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141471576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,613 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/f9805001a90b427b808d2e1aca9ece95, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T22:23:31,615 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 36a256a4871d36dc6632cf0cdb971cbb in 1101ms, sequenceid=131, compaction requested=false 2024-11-20T22:23:31,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:31,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:31,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-20T22:23:31,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-20T22:23:31,622 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-20T22:23:31,623 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9300 sec 2024-11-20T22:23:31,629 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.9410 sec 2024-11-20T22:23:31,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T22:23:31,795 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-20T22:23:31,803 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-20T22:23:31,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T22:23:31,808 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:31,809 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:31,810 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:31,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T22:23:31,965 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:31,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T22:23:31,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:31,967 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:23:31,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:31,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:31,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:31,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:31,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:31,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:31,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d4023fe99c684cfbab1f61bc59a967d9 is 50, key is test_row_0/A:col10/1732141410920/Put/seqid=0 2024-11-20T22:23:32,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741866_1042 (size=12151) 2024-11-20T22:23:32,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:32,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:32,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T22:23:32,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141472113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141472125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141472125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141472126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141472137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141472233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141472236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141472236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141472244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141472246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T22:23:32,451 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d4023fe99c684cfbab1f61bc59a967d9 2024-11-20T22:23:32,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141472455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141472456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141472456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141472462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141472469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/6c4d5503b0924178936dcbebe05aa468 is 50, key is test_row_0/B:col10/1732141410920/Put/seqid=0 2024-11-20T22:23:32,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741867_1043 (size=12151) 2024-11-20T22:23:32,570 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/6c4d5503b0924178936dcbebe05aa468 2024-11-20T22:23:32,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/c4aaaa1b7a934cc889ddaf447e500fdf is 50, key is test_row_0/C:col10/1732141410920/Put/seqid=0 2024-11-20T22:23:32,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741868_1044 (size=12151) 2024-11-20T22:23:32,642 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/c4aaaa1b7a934cc889ddaf447e500fdf 2024-11-20T22:23:32,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d4023fe99c684cfbab1f61bc59a967d9 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d4023fe99c684cfbab1f61bc59a967d9 2024-11-20T22:23:32,664 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d4023fe99c684cfbab1f61bc59a967d9, entries=150, sequenceid=156, filesize=11.9 K 2024-11-20T22:23:32,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/6c4d5503b0924178936dcbebe05aa468 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/6c4d5503b0924178936dcbebe05aa468 2024-11-20T22:23:32,679 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/6c4d5503b0924178936dcbebe05aa468, entries=150, sequenceid=156, filesize=11.9 K 2024-11-20T22:23:32,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/c4aaaa1b7a934cc889ddaf447e500fdf as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c4aaaa1b7a934cc889ddaf447e500fdf 2024-11-20T22:23:32,694 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c4aaaa1b7a934cc889ddaf447e500fdf, entries=150, sequenceid=156, filesize=11.9 K 2024-11-20T22:23:32,700 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 36a256a4871d36dc6632cf0cdb971cbb in 734ms, sequenceid=156, compaction requested=true 2024-11-20T22:23:32,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:32,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:32,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-20T22:23:32,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-20T22:23:32,706 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-20T22:23:32,707 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 894 msec 2024-11-20T22:23:32,712 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 905 msec 2024-11-20T22:23:32,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:32,772 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T22:23:32,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:32,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:32,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:32,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:32,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:32,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:32,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/093475e0021d4db29c9cbc4e32817ca0 is 50, key is test_row_0/A:col10/1732141412123/Put/seqid=0 2024-11-20T22:23:32,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741869_1045 (size=14541) 2024-11-20T22:23:32,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/093475e0021d4db29c9cbc4e32817ca0 2024-11-20T22:23:32,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141472816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141472825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,837 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/0df53cfc286f44ddaca12d8f9831831b is 50, key is test_row_0/B:col10/1732141412123/Put/seqid=0 2024-11-20T22:23:32,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141472828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141472831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141472832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741870_1046 (size=12151) 2024-11-20T22:23:32,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T22:23:32,917 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-20T22:23:32,920 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:32,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-20T22:23:32,924 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:32,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:23:32,925 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:32,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:32,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141472935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141472933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141472942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141472947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:32,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:32,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141472948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:23:33,081 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:33,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:33,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141473142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141473144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141473152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141473157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141473161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:23:33,236 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:33,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:33,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,237 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/0df53cfc286f44ddaca12d8f9831831b 2024-11-20T22:23:33,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/7c9e17a1ae32454da5818bdad576a6ee is 50, key is test_row_0/C:col10/1732141412123/Put/seqid=0 2024-11-20T22:23:33,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741871_1047 (size=12151) 2024-11-20T22:23:33,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/7c9e17a1ae32454da5818bdad576a6ee 2024-11-20T22:23:33,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/093475e0021d4db29c9cbc4e32817ca0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/093475e0021d4db29c9cbc4e32817ca0 2024-11-20T22:23:33,392 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:33,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:33,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/093475e0021d4db29c9cbc4e32817ca0, entries=200, sequenceid=173, filesize=14.2 K 2024-11-20T22:23:33,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/0df53cfc286f44ddaca12d8f9831831b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0df53cfc286f44ddaca12d8f9831831b 2024-11-20T22:23:33,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0df53cfc286f44ddaca12d8f9831831b, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T22:23:33,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/7c9e17a1ae32454da5818bdad576a6ee as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/7c9e17a1ae32454da5818bdad576a6ee 2024-11-20T22:23:33,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/7c9e17a1ae32454da5818bdad576a6ee, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T22:23:33,448 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 36a256a4871d36dc6632cf0cdb971cbb in 676ms, sequenceid=173, compaction requested=true 2024-11-20T22:23:33,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:33,449 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:33,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:33,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:33,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:33,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:33,450 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:33,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:33,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:33,456 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51034 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:33,456 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/A is initiating minor compaction (all files) 2024-11-20T22:23:33,457 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/A in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:33,457 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/4d4289662fed4e0699621cd323b8c89c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8acc2a831433477bac8151b848bd4a75, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d4023fe99c684cfbab1f61bc59a967d9, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/093475e0021d4db29c9cbc4e32817ca0] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=49.8 K 2024-11-20T22:23:33,457 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T22:23:33,459 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:33,459 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/B is initiating minor compaction (all files) 2024-11-20T22:23:33,459 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/B in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,460 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/89abe8a0bc54401faf0126f2f5ec8c43, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/3c3fb9b57c19490a86431879ca842d15, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/6c4d5503b0924178936dcbebe05aa468, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0df53cfc286f44ddaca12d8f9831831b] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=47.5 K 2024-11-20T22:23:33,460 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d4289662fed4e0699621cd323b8c89c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732141409300 2024-11-20T22:23:33,461 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 89abe8a0bc54401faf0126f2f5ec8c43, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732141409300 2024-11-20T22:23:33,462 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8acc2a831433477bac8151b848bd4a75, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732141409678 2024-11-20T22:23:33,463 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c3fb9b57c19490a86431879ca842d15, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732141409678 2024-11-20T22:23:33,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:33,464 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4023fe99c684cfbab1f61bc59a967d9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732141410897 2024-11-20T22:23:33,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:33,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:33,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:33,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:33,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:33,466 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c4d5503b0924178936dcbebe05aa468, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732141410897 2024-11-20T22:23:33,466 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 093475e0021d4db29c9cbc4e32817ca0, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141412113 2024-11-20T22:23:33,468 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0df53cfc286f44ddaca12d8f9831831b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141412123 2024-11-20T22:23:33,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141473491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,503 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d5136c9a71274c80986463c6622437ad is 50, key is test_row_0/A:col10/1732141413455/Put/seqid=0 2024-11-20T22:23:33,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141473493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141473495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141473498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141473503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:23:33,534 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#A#compaction#34 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:33,535 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/b2a8eb21fc3149cd83eddbc48d1f9d56 is 50, key is test_row_0/A:col10/1732141412123/Put/seqid=0 2024-11-20T22:23:33,549 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:33,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:33,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,554 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#B#compaction#35 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:33,555 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/5b3367f850704c33aee6f24b5124ef5d is 50, key is test_row_0/B:col10/1732141412123/Put/seqid=0 2024-11-20T22:23:33,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741872_1048 (size=14541) 2024-11-20T22:23:33,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141473605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141473607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141473607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141473607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141473613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741873_1049 (size=12527) 2024-11-20T22:23:33,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741874_1050 (size=12527) 2024-11-20T22:23:33,672 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/5b3367f850704c33aee6f24b5124ef5d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/5b3367f850704c33aee6f24b5124ef5d 2024-11-20T22:23:33,685 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/B of 36a256a4871d36dc6632cf0cdb971cbb into 5b3367f850704c33aee6f24b5124ef5d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:33,685 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:33,688 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/B, priority=12, startTime=1732141413449; duration=0sec 2024-11-20T22:23:33,688 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:33,688 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:B 2024-11-20T22:23:33,689 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:33,693 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:33,695 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/C is initiating minor compaction (all files) 2024-11-20T22:23:33,695 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/C in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,695 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/7ed7bb30ced048d6867478e260e0d35c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/f9805001a90b427b808d2e1aca9ece95, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c4aaaa1b7a934cc889ddaf447e500fdf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/7c9e17a1ae32454da5818bdad576a6ee] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=47.5 K 2024-11-20T22:23:33,696 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ed7bb30ced048d6867478e260e0d35c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732141409300 2024-11-20T22:23:33,697 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting f9805001a90b427b808d2e1aca9ece95, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732141409678 2024-11-20T22:23:33,698 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c4aaaa1b7a934cc889ddaf447e500fdf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732141410897 2024-11-20T22:23:33,699 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c9e17a1ae32454da5818bdad576a6ee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141412123 2024-11-20T22:23:33,711 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:33,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:33,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,731 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#C#compaction#36 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:33,732 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/1f2e4fa308f141dfbd49555b6b6542be is 50, key is test_row_0/C:col10/1732141412123/Put/seqid=0 2024-11-20T22:23:33,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741875_1051 (size=12527) 2024-11-20T22:23:33,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141473816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141473817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141473818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141473818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:33,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141473822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,867 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:33,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:33,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:33,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:33,869 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:33,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d5136c9a71274c80986463c6622437ad 2024-11-20T22:23:34,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/e9bc96329ae646078f0bb84a5e921625 is 50, key is test_row_0/B:col10/1732141413455/Put/seqid=0 2024-11-20T22:23:34,025 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:34,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:34,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:34,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:34,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:34,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:34,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:34,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:23:34,054 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/b2a8eb21fc3149cd83eddbc48d1f9d56 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/b2a8eb21fc3149cd83eddbc48d1f9d56 2024-11-20T22:23:34,076 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/A of 36a256a4871d36dc6632cf0cdb971cbb into b2a8eb21fc3149cd83eddbc48d1f9d56(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:34,076 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:34,076 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/A, priority=12, startTime=1732141413449; duration=0sec 2024-11-20T22:23:34,076 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:34,076 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:A 2024-11-20T22:23:34,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741876_1052 (size=12151) 2024-11-20T22:23:34,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/e9bc96329ae646078f0bb84a5e921625 2024-11-20T22:23:34,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/8b1c7c7e6c104f5a96dbedb318145631 is 50, key is test_row_0/C:col10/1732141413455/Put/seqid=0 2024-11-20T22:23:34,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141474132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141474132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141474140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141474132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141474143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741877_1053 (size=12151) 2024-11-20T22:23:34,167 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/8b1c7c7e6c104f5a96dbedb318145631 2024-11-20T22:23:34,187 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d5136c9a71274c80986463c6622437ad as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d5136c9a71274c80986463c6622437ad 2024-11-20T22:23:34,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:34,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:34,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:34,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:34,188 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:34,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:34,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:34,202 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/1f2e4fa308f141dfbd49555b6b6542be as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/1f2e4fa308f141dfbd49555b6b6542be 2024-11-20T22:23:34,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d5136c9a71274c80986463c6622437ad, entries=200, sequenceid=195, filesize=14.2 K 2024-11-20T22:23:34,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/e9bc96329ae646078f0bb84a5e921625 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e9bc96329ae646078f0bb84a5e921625 2024-11-20T22:23:34,222 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/C of 36a256a4871d36dc6632cf0cdb971cbb into 1f2e4fa308f141dfbd49555b6b6542be(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:34,222 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:34,222 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/C, priority=12, startTime=1732141413450; duration=0sec 2024-11-20T22:23:34,223 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:34,223 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:C 2024-11-20T22:23:34,229 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e9bc96329ae646078f0bb84a5e921625, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T22:23:34,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/8b1c7c7e6c104f5a96dbedb318145631 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/8b1c7c7e6c104f5a96dbedb318145631 2024-11-20T22:23:34,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/8b1c7c7e6c104f5a96dbedb318145631, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T22:23:34,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 36a256a4871d36dc6632cf0cdb971cbb in 795ms, sequenceid=195, compaction requested=false 2024-11-20T22:23:34,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:34,342 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:34,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:34,343 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:23:34,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:34,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:34,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:34,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:34,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:34,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:34,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/e431f5fe61ce42c38a1e211c5421b14b is 50, key is test_row_0/A:col10/1732141413491/Put/seqid=0 2024-11-20T22:23:34,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741878_1054 (size=12151) 2024-11-20T22:23:34,399 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/e431f5fe61ce42c38a1e211c5421b14b 2024-11-20T22:23:34,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/2b67b77f3f464f0493e6852a690a76cb is 50, key is test_row_0/B:col10/1732141413491/Put/seqid=0 2024-11-20T22:23:34,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741879_1055 (size=12151) 2024-11-20T22:23:34,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:34,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:34,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141474682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141474684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141474687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141474691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141474692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141474793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141474794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141474796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141474796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:34,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141474801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:34,850 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/2b67b77f3f464f0493e6852a690a76cb 2024-11-20T22:23:34,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/f08061d5faf44c8da0ce2478a38da3e4 is 50, key is test_row_0/C:col10/1732141413491/Put/seqid=0 2024-11-20T22:23:34,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741880_1056 (size=12151) 2024-11-20T22:23:34,894 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/f08061d5faf44c8da0ce2478a38da3e4 2024-11-20T22:23:34,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/e431f5fe61ce42c38a1e211c5421b14b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/e431f5fe61ce42c38a1e211c5421b14b 2024-11-20T22:23:34,916 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/e431f5fe61ce42c38a1e211c5421b14b, entries=150, sequenceid=213, filesize=11.9 K 2024-11-20T22:23:34,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/2b67b77f3f464f0493e6852a690a76cb as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2b67b77f3f464f0493e6852a690a76cb 2024-11-20T22:23:34,928 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2b67b77f3f464f0493e6852a690a76cb, entries=150, sequenceid=213, filesize=11.9 K 2024-11-20T22:23:34,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/f08061d5faf44c8da0ce2478a38da3e4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/f08061d5faf44c8da0ce2478a38da3e4 2024-11-20T22:23:34,940 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/f08061d5faf44c8da0ce2478a38da3e4, entries=150, sequenceid=213, filesize=11.9 K 2024-11-20T22:23:34,941 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 36a256a4871d36dc6632cf0cdb971cbb in 598ms, sequenceid=213, compaction requested=true 2024-11-20T22:23:34,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:34,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:34,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-20T22:23:34,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-20T22:23:34,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-20T22:23:34,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0170 sec 2024-11-20T22:23:34,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 2.0270 sec 2024-11-20T22:23:35,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:23:35,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:35,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:35,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:35,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:35,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:35,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:35,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:35,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/85d4603566544a9891ac7e6893da89fe is 50, key is test_row_0/A:col10/1732141414689/Put/seqid=0 2024-11-20T22:23:35,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:23:35,031 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-20T22:23:35,034 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:35,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-20T22:23:35,037 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:35,038 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:35,038 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:35,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T22:23:35,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141475028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141475027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141475030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141475031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141475031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741881_1057 (size=14541) 2024-11-20T22:23:35,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/85d4603566544a9891ac7e6893da89fe 2024-11-20T22:23:35,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/c703176e9ad34859b83180aad4bf4116 is 50, key is test_row_0/B:col10/1732141414689/Put/seqid=0 2024-11-20T22:23:35,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741882_1058 (size=12151) 2024-11-20T22:23:35,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/c703176e9ad34859b83180aad4bf4116 2024-11-20T22:23:35,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T22:23:35,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141475148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141475148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141475149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/46a2839fcd0241bfb11b23b3d0269d86 is 50, key is test_row_0/C:col10/1732141414689/Put/seqid=0 2024-11-20T22:23:35,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141475148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141475149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741883_1059 (size=12151) 2024-11-20T22:23:35,196 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T22:23:35,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:35,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:35,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:35,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:35,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:35,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:35,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T22:23:35,352 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T22:23:35,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:35,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:35,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:35,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:35,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:35,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:35,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141475361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141475361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141475361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141475364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141475366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T22:23:35,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:35,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:35,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:35,509 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:35,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:35,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:35,595 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/46a2839fcd0241bfb11b23b3d0269d86 2024-11-20T22:23:35,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/85d4603566544a9891ac7e6893da89fe as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/85d4603566544a9891ac7e6893da89fe 2024-11-20T22:23:35,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T22:23:35,663 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/85d4603566544a9891ac7e6893da89fe, entries=200, sequenceid=236, filesize=14.2 K 2024-11-20T22:23:35,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T22:23:35,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:35,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:35,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:35,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:35,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:35,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:35,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/c703176e9ad34859b83180aad4bf4116 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c703176e9ad34859b83180aad4bf4116 2024-11-20T22:23:35,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141475670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141475670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141475671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141475672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141475673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c703176e9ad34859b83180aad4bf4116, entries=150, sequenceid=236, filesize=11.9 K 2024-11-20T22:23:35,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/46a2839fcd0241bfb11b23b3d0269d86 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/46a2839fcd0241bfb11b23b3d0269d86 2024-11-20T22:23:35,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/46a2839fcd0241bfb11b23b3d0269d86, entries=150, sequenceid=236, filesize=11.9 K 2024-11-20T22:23:35,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 36a256a4871d36dc6632cf0cdb971cbb in 694ms, sequenceid=236, compaction requested=true 2024-11-20T22:23:35,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:35,699 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:35,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:35,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:35,700 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:35,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:35,703 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:35,704 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53760 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:35,704 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/B is initiating minor compaction (all files) 2024-11-20T22:23:35,704 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/A is initiating minor compaction (all files) 2024-11-20T22:23:35,704 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/B in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:35,704 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/A in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:35,704 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/5b3367f850704c33aee6f24b5124ef5d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e9bc96329ae646078f0bb84a5e921625, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2b67b77f3f464f0493e6852a690a76cb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c703176e9ad34859b83180aad4bf4116] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=47.8 K 2024-11-20T22:23:35,704 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/b2a8eb21fc3149cd83eddbc48d1f9d56, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d5136c9a71274c80986463c6622437ad, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/e431f5fe61ce42c38a1e211c5421b14b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/85d4603566544a9891ac7e6893da89fe] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=52.5 K 2024-11-20T22:23:35,705 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting b2a8eb21fc3149cd83eddbc48d1f9d56, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141412123 2024-11-20T22:23:35,705 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b3367f850704c33aee6f24b5124ef5d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141412123 2024-11-20T22:23:35,706 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting d5136c9a71274c80986463c6622437ad, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732141412825 2024-11-20T22:23:35,706 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9bc96329ae646078f0bb84a5e921625, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732141412829 2024-11-20T22:23:35,707 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting e431f5fe61ce42c38a1e211c5421b14b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732141413491 2024-11-20T22:23:35,707 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b67b77f3f464f0493e6852a690a76cb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732141413491 2024-11-20T22:23:35,708 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 85d4603566544a9891ac7e6893da89fe, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732141414678 2024-11-20T22:23:35,708 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting c703176e9ad34859b83180aad4bf4116, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732141414678 2024-11-20T22:23:35,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:35,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:35,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:35,739 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#B#compaction#45 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:35,740 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/0ec5c58a57c34b8cbcef7594bb829c77 is 50, key is test_row_0/B:col10/1732141414689/Put/seqid=0 2024-11-20T22:23:35,752 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#A#compaction#46 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:35,753 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/8ca75427dc30442cbb082712e9613fd6 is 50, key is test_row_0/A:col10/1732141414689/Put/seqid=0 2024-11-20T22:23:35,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741884_1060 (size=12663) 2024-11-20T22:23:35,823 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:35,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T22:23:35,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:35,825 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:23:35,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:35,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:35,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:35,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:35,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:35,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:35,828 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/0ec5c58a57c34b8cbcef7594bb829c77 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0ec5c58a57c34b8cbcef7594bb829c77 2024-11-20T22:23:35,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/fd946ccc83ea493eab28278a567ce389 is 50, key is test_row_0/A:col10/1732141415028/Put/seqid=0 2024-11-20T22:23:35,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741885_1061 (size=12663) 2024-11-20T22:23:35,844 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/B of 36a256a4871d36dc6632cf0cdb971cbb into 0ec5c58a57c34b8cbcef7594bb829c77(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:35,844 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:35,844 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/B, priority=12, startTime=1732141415700; duration=0sec 2024-11-20T22:23:35,845 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:35,845 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:B 2024-11-20T22:23:35,845 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:35,848 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:35,849 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/C is initiating minor compaction (all files) 2024-11-20T22:23:35,849 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/C in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:35,849 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/1f2e4fa308f141dfbd49555b6b6542be, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/8b1c7c7e6c104f5a96dbedb318145631, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/f08061d5faf44c8da0ce2478a38da3e4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/46a2839fcd0241bfb11b23b3d0269d86] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=47.8 K 2024-11-20T22:23:35,850 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f2e4fa308f141dfbd49555b6b6542be, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141412123 2024-11-20T22:23:35,852 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b1c7c7e6c104f5a96dbedb318145631, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732141412829 2024-11-20T22:23:35,852 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting f08061d5faf44c8da0ce2478a38da3e4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732141413491 2024-11-20T22:23:35,853 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46a2839fcd0241bfb11b23b3d0269d86, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732141414678 2024-11-20T22:23:35,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741886_1062 (size=12151) 2024-11-20T22:23:35,897 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#C#compaction#48 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:35,898 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/30456054936c45679f31180aa87f3f99 is 50, key is test_row_0/C:col10/1732141414689/Put/seqid=0 2024-11-20T22:23:35,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741887_1063 (size=12663) 2024-11-20T22:23:35,957 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/30456054936c45679f31180aa87f3f99 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/30456054936c45679f31180aa87f3f99 2024-11-20T22:23:35,979 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/C of 36a256a4871d36dc6632cf0cdb971cbb into 30456054936c45679f31180aa87f3f99(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:35,979 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:35,979 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/C, priority=12, startTime=1732141415728; duration=0sec 2024-11-20T22:23:35,979 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:35,979 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:C 2024-11-20T22:23:36,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T22:23:36,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:36,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:36,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141476227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141476228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141476232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141476234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141476236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,247 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/8ca75427dc30442cbb082712e9613fd6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ca75427dc30442cbb082712e9613fd6 2024-11-20T22:23:36,259 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/A of 36a256a4871d36dc6632cf0cdb971cbb into 8ca75427dc30442cbb082712e9613fd6(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:36,260 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:36,260 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/A, priority=12, startTime=1732141415699; duration=0sec 2024-11-20T22:23:36,260 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:36,260 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:A 2024-11-20T22:23:36,288 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/fd946ccc83ea493eab28278a567ce389 2024-11-20T22:23:36,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/ea618b5d5ade4c5ca372e29c91836806 is 50, key is test_row_0/B:col10/1732141415028/Put/seqid=0 2024-11-20T22:23:36,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141476340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141476342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141476342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141476344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141476337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741888_1064 (size=12151) 2024-11-20T22:23:36,372 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/ea618b5d5ade4c5ca372e29c91836806 2024-11-20T22:23:36,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/8dc68ccc761548588ccb416e288f6495 is 50, key is test_row_0/C:col10/1732141415028/Put/seqid=0 2024-11-20T22:23:36,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741889_1065 (size=12151) 2024-11-20T22:23:36,453 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/8dc68ccc761548588ccb416e288f6495 2024-11-20T22:23:36,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/fd946ccc83ea493eab28278a567ce389 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/fd946ccc83ea493eab28278a567ce389 2024-11-20T22:23:36,477 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/fd946ccc83ea493eab28278a567ce389, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T22:23:36,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/ea618b5d5ade4c5ca372e29c91836806 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/ea618b5d5ade4c5ca372e29c91836806 2024-11-20T22:23:36,487 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/ea618b5d5ade4c5ca372e29c91836806, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T22:23:36,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/8dc68ccc761548588ccb416e288f6495 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/8dc68ccc761548588ccb416e288f6495 2024-11-20T22:23:36,495 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/8dc68ccc761548588ccb416e288f6495, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T22:23:36,501 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 36a256a4871d36dc6632cf0cdb971cbb in 676ms, sequenceid=250, compaction requested=false 2024-11-20T22:23:36,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:36,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:36,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-20T22:23:36,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-20T22:23:36,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-20T22:23:36,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4650 sec 2024-11-20T22:23:36,514 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.4760 sec 2024-11-20T22:23:36,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:36,554 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:23:36,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:36,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:36,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:36,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:36,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:36,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:36,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/8ea0001a70d244f88a0e5b10e75cfea8 is 50, key is test_row_0/A:col10/1732141416231/Put/seqid=0 2024-11-20T22:23:36,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141476574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141476580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141476580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141476580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141476587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741890_1066 (size=14741) 2024-11-20T22:23:36,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141476688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141476689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141476692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141476693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141476694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141476892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141476896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141476896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141476901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:36,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141476901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/8ea0001a70d244f88a0e5b10e75cfea8 2024-11-20T22:23:37,046 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/2d9b553381fc4b66bf4764c761e687e8 is 50, key is test_row_0/B:col10/1732141416231/Put/seqid=0 2024-11-20T22:23:37,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741891_1067 (size=12301) 2024-11-20T22:23:37,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/2d9b553381fc4b66bf4764c761e687e8 2024-11-20T22:23:37,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/3a1d5680113444eab100d9cbb4f1be17 is 50, key is test_row_0/C:col10/1732141416231/Put/seqid=0 2024-11-20T22:23:37,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741892_1068 (size=12301) 2024-11-20T22:23:37,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/3a1d5680113444eab100d9cbb4f1be17 2024-11-20T22:23:37,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/8ea0001a70d244f88a0e5b10e75cfea8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ea0001a70d244f88a0e5b10e75cfea8 2024-11-20T22:23:37,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ea0001a70d244f88a0e5b10e75cfea8, entries=200, sequenceid=277, filesize=14.4 K 2024-11-20T22:23:37,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/2d9b553381fc4b66bf4764c761e687e8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2d9b553381fc4b66bf4764c761e687e8 2024-11-20T22:23:37,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2d9b553381fc4b66bf4764c761e687e8, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T22:23:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T22:23:37,147 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-20T22:23:37,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/3a1d5680113444eab100d9cbb4f1be17 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/3a1d5680113444eab100d9cbb4f1be17 2024-11-20T22:23:37,150 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:37,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-20T22:23:37,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T22:23:37,153 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:37,154 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:37,154 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:37,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/3a1d5680113444eab100d9cbb4f1be17, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T22:23:37,159 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 36a256a4871d36dc6632cf0cdb971cbb in 604ms, sequenceid=277, compaction requested=true 2024-11-20T22:23:37,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:37,159 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:37,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:37,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:37,160 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:37,161 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39555 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:37,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:37,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:37,161 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/A is initiating minor compaction (all files) 2024-11-20T22:23:37,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:37,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:37,161 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/A in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:37,161 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ca75427dc30442cbb082712e9613fd6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/fd946ccc83ea493eab28278a567ce389, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ea0001a70d244f88a0e5b10e75cfea8] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=38.6 K 2024-11-20T22:23:37,162 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ca75427dc30442cbb082712e9613fd6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732141414678 2024-11-20T22:23:37,162 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd946ccc83ea493eab28278a567ce389, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732141415027 2024-11-20T22:23:37,162 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:37,163 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/B is initiating minor compaction (all files) 2024-11-20T22:23:37,163 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/B in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:37,163 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0ec5c58a57c34b8cbcef7594bb829c77, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/ea618b5d5ade4c5ca372e29c91836806, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2d9b553381fc4b66bf4764c761e687e8] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=36.2 K 2024-11-20T22:23:37,163 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ea0001a70d244f88a0e5b10e75cfea8, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732141416226 2024-11-20T22:23:37,164 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ec5c58a57c34b8cbcef7594bb829c77, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732141414678 2024-11-20T22:23:37,165 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ea618b5d5ade4c5ca372e29c91836806, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732141415027 2024-11-20T22:23:37,165 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d9b553381fc4b66bf4764c761e687e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732141416226 2024-11-20T22:23:37,185 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#A#compaction#54 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:37,185 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#B#compaction#55 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:37,186 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d0f236c24cb44828b85f6caef1c793d5 is 50, key is test_row_0/A:col10/1732141416231/Put/seqid=0 2024-11-20T22:23:37,186 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/8969f369d962467db601f444b8d56b68 is 50, key is test_row_0/B:col10/1732141416231/Put/seqid=0 2024-11-20T22:23:37,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:23:37,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:37,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:37,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:37,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:37,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:37,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:37,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:37,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741893_1069 (size=12915) 2024-11-20T22:23:37,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T22:23:37,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d89f84870cde4c11aad0422e76bb8292 is 50, key is test_row_0/A:col10/1732141417199/Put/seqid=0 2024-11-20T22:23:37,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141477264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141477267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741894_1070 (size=12915) 2024-11-20T22:23:37,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141477267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141477268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,279 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/8969f369d962467db601f444b8d56b68 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/8969f369d962467db601f444b8d56b68 2024-11-20T22:23:37,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141477277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741895_1071 (size=14741) 2024-11-20T22:23:37,291 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/B of 36a256a4871d36dc6632cf0cdb971cbb into 8969f369d962467db601f444b8d56b68(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:37,291 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:37,291 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/B, priority=13, startTime=1732141417160; duration=0sec 2024-11-20T22:23:37,291 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:37,291 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:B 2024-11-20T22:23:37,291 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:37,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d89f84870cde4c11aad0422e76bb8292 2024-11-20T22:23:37,296 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:37,296 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/C is initiating minor compaction (all files) 2024-11-20T22:23:37,297 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/C in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:37,297 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/30456054936c45679f31180aa87f3f99, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/8dc68ccc761548588ccb416e288f6495, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/3a1d5680113444eab100d9cbb4f1be17] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=36.2 K 2024-11-20T22:23:37,297 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 30456054936c45679f31180aa87f3f99, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732141414678 2024-11-20T22:23:37,300 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 8dc68ccc761548588ccb416e288f6495, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732141415027 2024-11-20T22:23:37,303 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a1d5680113444eab100d9cbb4f1be17, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732141416226 2024-11-20T22:23:37,307 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T22:23:37,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:37,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:37,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:37,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:37,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:37,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:37,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/4f3f90df92a4482d85738c1a4323c816 is 50, key is test_row_0/B:col10/1732141417199/Put/seqid=0 2024-11-20T22:23:37,335 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#C#compaction#58 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:37,335 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/920049fe5965486dac765ce6ba9a6a1b is 50, key is test_row_0/C:col10/1732141416231/Put/seqid=0 2024-11-20T22:23:37,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741896_1072 (size=12301) 2024-11-20T22:23:37,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141477371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141477376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141477376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141477379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141477382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741897_1073 (size=12915) 2024-11-20T22:23:37,403 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/920049fe5965486dac765ce6ba9a6a1b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/920049fe5965486dac765ce6ba9a6a1b 2024-11-20T22:23:37,412 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/C of 36a256a4871d36dc6632cf0cdb971cbb into 920049fe5965486dac765ce6ba9a6a1b(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:37,412 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:37,412 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/C, priority=13, startTime=1732141417161; duration=0sec 2024-11-20T22:23:37,412 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:37,412 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:C 2024-11-20T22:23:37,465 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T22:23:37,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T22:23:37,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:37,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:37,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:37,467 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:37,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:37,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:37,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141477579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141477586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141477586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141477591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141477591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,619 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,620 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T22:23:37,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:37,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:37,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:37,620 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:37,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:37,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:37,638 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d0f236c24cb44828b85f6caef1c793d5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d0f236c24cb44828b85f6caef1c793d5 2024-11-20T22:23:37,648 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/A of 36a256a4871d36dc6632cf0cdb971cbb into d0f236c24cb44828b85f6caef1c793d5(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:37,648 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:37,648 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/A, priority=13, startTime=1732141417159; duration=0sec 2024-11-20T22:23:37,648 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:37,648 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:A 2024-11-20T22:23:37,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T22:23:37,774 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T22:23:37,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:37,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:37,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:37,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:37,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:37,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/4f3f90df92a4482d85738c1a4323c816 2024-11-20T22:23:37,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:37,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/be333753cb3b484b8b97747bb4d9ad88 is 50, key is test_row_0/C:col10/1732141417199/Put/seqid=0 2024-11-20T22:23:37,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741898_1074 (size=12301) 2024-11-20T22:23:37,862 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/be333753cb3b484b8b97747bb4d9ad88 2024-11-20T22:23:37,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d89f84870cde4c11aad0422e76bb8292 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d89f84870cde4c11aad0422e76bb8292 2024-11-20T22:23:37,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d89f84870cde4c11aad0422e76bb8292, entries=200, sequenceid=290, filesize=14.4 K 2024-11-20T22:23:37,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/4f3f90df92a4482d85738c1a4323c816 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/4f3f90df92a4482d85738c1a4323c816 2024-11-20T22:23:37,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/4f3f90df92a4482d85738c1a4323c816, entries=150, sequenceid=290, filesize=12.0 K 2024-11-20T22:23:37,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/be333753cb3b484b8b97747bb4d9ad88 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/be333753cb3b484b8b97747bb4d9ad88 2024-11-20T22:23:37,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141477888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141477895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141477896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141477897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141477897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,922 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/be333753cb3b484b8b97747bb4d9ad88, entries=150, sequenceid=290, filesize=12.0 K 2024-11-20T22:23:37,924 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 36a256a4871d36dc6632cf0cdb971cbb in 722ms, sequenceid=290, compaction requested=false 2024-11-20T22:23:37,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:37,936 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:37,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T22:23:37,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:37,937 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:23:37,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:37,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:37,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:37,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:37,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:37,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:37,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/4875fc8a8b1b4e48a68c3db57dacf734 is 50, key is test_row_0/A:col10/1732141417238/Put/seqid=0 2024-11-20T22:23:37,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741899_1075 (size=12301) 2024-11-20T22:23:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T22:23:38,374 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/4875fc8a8b1b4e48a68c3db57dacf734 2024-11-20T22:23:38,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/b04262b52aa84171b5596b5f1297e273 is 50, key is test_row_0/B:col10/1732141417238/Put/seqid=0 2024-11-20T22:23:38,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:38,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741900_1076 (size=12301) 2024-11-20T22:23:38,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141478432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141478432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141478440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141478440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141478444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141478544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141478549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141478552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141478553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141478552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141478747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141478755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141478758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141478763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:38,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141478792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:38,842 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/b04262b52aa84171b5596b5f1297e273 2024-11-20T22:23:38,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/bb0e115ac3bf4e5aa60a97321ed7f191 is 50, key is test_row_0/C:col10/1732141417238/Put/seqid=0 2024-11-20T22:23:38,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741901_1077 (size=12301) 2024-11-20T22:23:38,891 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/bb0e115ac3bf4e5aa60a97321ed7f191 2024-11-20T22:23:38,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/4875fc8a8b1b4e48a68c3db57dacf734 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/4875fc8a8b1b4e48a68c3db57dacf734 2024-11-20T22:23:38,910 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/4875fc8a8b1b4e48a68c3db57dacf734, entries=150, sequenceid=316, filesize=12.0 K 2024-11-20T22:23:38,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/b04262b52aa84171b5596b5f1297e273 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/b04262b52aa84171b5596b5f1297e273 2024-11-20T22:23:38,920 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/b04262b52aa84171b5596b5f1297e273, entries=150, sequenceid=316, filesize=12.0 K 2024-11-20T22:23:38,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/bb0e115ac3bf4e5aa60a97321ed7f191 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/bb0e115ac3bf4e5aa60a97321ed7f191 2024-11-20T22:23:38,935 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/bb0e115ac3bf4e5aa60a97321ed7f191, entries=150, sequenceid=316, filesize=12.0 K 2024-11-20T22:23:38,937 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 36a256a4871d36dc6632cf0cdb971cbb in 1000ms, sequenceid=316, compaction requested=true 2024-11-20T22:23:38,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:38,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:38,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-20T22:23:38,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-20T22:23:38,941 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-20T22:23:38,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7850 sec 2024-11-20T22:23:38,944 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.7930 sec 2024-11-20T22:23:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:39,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:23:39,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:39,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:39,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:39,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:39,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:39,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:39,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/9f5007c8471549fb998fb89db8eec13d is 50, key is test_row_0/A:col10/1732141419054/Put/seqid=0 2024-11-20T22:23:39,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141479115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141479118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141479121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141479122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141479122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741902_1078 (size=12301) 2024-11-20T22:23:39,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/9f5007c8471549fb998fb89db8eec13d 2024-11-20T22:23:39,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/d78d9b26c00e4dc793d9107ac414e3b7 is 50, key is test_row_0/B:col10/1732141419054/Put/seqid=0 2024-11-20T22:23:39,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741903_1079 (size=12301) 2024-11-20T22:23:39,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/d78d9b26c00e4dc793d9107ac414e3b7 2024-11-20T22:23:39,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141479226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141479227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141479230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141479235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141479235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/38bc3d99326c4adeaa2f6510f68dfab0 is 50, key is test_row_0/C:col10/1732141419054/Put/seqid=0 2024-11-20T22:23:39,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T22:23:39,274 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-20T22:23:39,279 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:39,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-20T22:23:39,282 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:39,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T22:23:39,283 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:39,283 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:39,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741904_1080 (size=12301) 2024-11-20T22:23:39,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/38bc3d99326c4adeaa2f6510f68dfab0 2024-11-20T22:23:39,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/9f5007c8471549fb998fb89db8eec13d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/9f5007c8471549fb998fb89db8eec13d 2024-11-20T22:23:39,326 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/9f5007c8471549fb998fb89db8eec13d, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:23:39,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/d78d9b26c00e4dc793d9107ac414e3b7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/d78d9b26c00e4dc793d9107ac414e3b7 2024-11-20T22:23:39,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/d78d9b26c00e4dc793d9107ac414e3b7, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:23:39,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/38bc3d99326c4adeaa2f6510f68dfab0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/38bc3d99326c4adeaa2f6510f68dfab0 2024-11-20T22:23:39,346 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/38bc3d99326c4adeaa2f6510f68dfab0, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:23:39,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 36a256a4871d36dc6632cf0cdb971cbb in 293ms, sequenceid=330, compaction requested=true 2024-11-20T22:23:39,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:39,348 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:39,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:39,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:39,350 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:39,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:39,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:39,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:39,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:39,351 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52258 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:39,351 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/A is initiating minor compaction (all files) 2024-11-20T22:23:39,351 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/A in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:39,352 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d0f236c24cb44828b85f6caef1c793d5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d89f84870cde4c11aad0422e76bb8292, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/4875fc8a8b1b4e48a68c3db57dacf734, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/9f5007c8471549fb998fb89db8eec13d] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=51.0 K 2024-11-20T22:23:39,352 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0f236c24cb44828b85f6caef1c793d5, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732141416226 2024-11-20T22:23:39,353 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:39,353 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/B is initiating minor compaction (all files) 2024-11-20T22:23:39,353 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting d89f84870cde4c11aad0422e76bb8292, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732141416564 2024-11-20T22:23:39,353 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/B in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:39,353 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/8969f369d962467db601f444b8d56b68, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/4f3f90df92a4482d85738c1a4323c816, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/b04262b52aa84171b5596b5f1297e273, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/d78d9b26c00e4dc793d9107ac414e3b7] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=48.7 K 2024-11-20T22:23:39,353 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4875fc8a8b1b4e48a68c3db57dacf734, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732141417238 2024-11-20T22:23:39,354 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 8969f369d962467db601f444b8d56b68, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732141416226 2024-11-20T22:23:39,354 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f5007c8471549fb998fb89db8eec13d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141418421 2024-11-20T22:23:39,354 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f3f90df92a4482d85738c1a4323c816, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732141416564 2024-11-20T22:23:39,355 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting b04262b52aa84171b5596b5f1297e273, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732141417238 2024-11-20T22:23:39,355 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting d78d9b26c00e4dc793d9107ac414e3b7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141418421 2024-11-20T22:23:39,369 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#A#compaction#66 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:39,370 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/da1f868db3da4eef81cae7815df54a05 is 50, key is test_row_0/A:col10/1732141419054/Put/seqid=0 2024-11-20T22:23:39,374 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#B#compaction#67 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:39,374 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/ffe38d712bfa42ffb1177794aeebe934 is 50, key is test_row_0/B:col10/1732141419054/Put/seqid=0 2024-11-20T22:23:39,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T22:23:39,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741906_1082 (size=13051) 2024-11-20T22:23:39,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741905_1081 (size=13051) 2024-11-20T22:23:39,435 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T22:23:39,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:39,437 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:23:39,439 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/ffe38d712bfa42ffb1177794aeebe934 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/ffe38d712bfa42ffb1177794aeebe934 2024-11-20T22:23:39,445 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/da1f868db3da4eef81cae7815df54a05 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/da1f868db3da4eef81cae7815df54a05 2024-11-20T22:23:39,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:39,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:39,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:39,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:39,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:39,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:39,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:39,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:39,460 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/B of 36a256a4871d36dc6632cf0cdb971cbb into ffe38d712bfa42ffb1177794aeebe934(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:39,460 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:39,460 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/B, priority=12, startTime=1732141419350; duration=0sec 2024-11-20T22:23:39,461 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:39,461 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:B 2024-11-20T22:23:39,461 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:39,465 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:39,466 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/C is initiating minor compaction (all files) 2024-11-20T22:23:39,466 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/C in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:39,466 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/920049fe5965486dac765ce6ba9a6a1b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/be333753cb3b484b8b97747bb4d9ad88, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/bb0e115ac3bf4e5aa60a97321ed7f191, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/38bc3d99326c4adeaa2f6510f68dfab0] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=48.7 K 2024-11-20T22:23:39,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d05570d2f6ed4497952622cb14f60b30 is 50, key is test_row_0/A:col10/1732141419436/Put/seqid=0 2024-11-20T22:23:39,467 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 920049fe5965486dac765ce6ba9a6a1b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732141416226 2024-11-20T22:23:39,467 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting be333753cb3b484b8b97747bb4d9ad88, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732141416564 2024-11-20T22:23:39,469 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting bb0e115ac3bf4e5aa60a97321ed7f191, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732141417238 2024-11-20T22:23:39,470 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 38bc3d99326c4adeaa2f6510f68dfab0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141418421 2024-11-20T22:23:39,472 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/A of 36a256a4871d36dc6632cf0cdb971cbb into da1f868db3da4eef81cae7815df54a05(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:39,472 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:39,472 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/A, priority=12, startTime=1732141419348; duration=0sec 2024-11-20T22:23:39,472 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:39,472 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:A 2024-11-20T22:23:39,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141479465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141479470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141479477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141479485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141479493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,536 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#C#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:39,537 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/e3a5324926754aba847a3e7caa293a39 is 50, key is test_row_0/C:col10/1732141419054/Put/seqid=0 2024-11-20T22:23:39,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741907_1083 (size=14741) 2024-11-20T22:23:39,574 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d05570d2f6ed4497952622cb14f60b30 2024-11-20T22:23:39,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T22:23:39,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141479587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141479587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141479594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141479599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/e785dbf75cb44763934a5393d96d961c is 50, key is test_row_0/B:col10/1732141419436/Put/seqid=0 2024-11-20T22:23:39,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141479602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741908_1084 (size=13051) 2024-11-20T22:23:39,636 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/e3a5324926754aba847a3e7caa293a39 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/e3a5324926754aba847a3e7caa293a39 2024-11-20T22:23:39,648 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/C of 36a256a4871d36dc6632cf0cdb971cbb into e3a5324926754aba847a3e7caa293a39(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:39,648 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:39,649 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/C, priority=12, startTime=1732141419351; duration=0sec 2024-11-20T22:23:39,649 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:39,649 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:C 2024-11-20T22:23:39,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741909_1085 (size=12301) 2024-11-20T22:23:39,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141479795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141479803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141479802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,813 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141479811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:39,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141479818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:39,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T22:23:40,091 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/e785dbf75cb44763934a5393d96d961c 2024-11-20T22:23:40,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:40,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141480111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:40,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:40,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141480114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:40,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/fa890bd7c4874e04a6ada1c7d40716e7 is 50, key is test_row_0/C:col10/1732141419436/Put/seqid=0 2024-11-20T22:23:40,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:40,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141480116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:40,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:40,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141480123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:40,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141480128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:40,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741910_1086 (size=12301) 2024-11-20T22:23:40,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T22:23:40,586 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/fa890bd7c4874e04a6ada1c7d40716e7 2024-11-20T22:23:40,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/d05570d2f6ed4497952622cb14f60b30 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d05570d2f6ed4497952622cb14f60b30 2024-11-20T22:23:40,615 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d05570d2f6ed4497952622cb14f60b30, entries=200, sequenceid=354, filesize=14.4 K 2024-11-20T22:23:40,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/e785dbf75cb44763934a5393d96d961c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e785dbf75cb44763934a5393d96d961c 2024-11-20T22:23:40,629 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e785dbf75cb44763934a5393d96d961c, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T22:23:40,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/fa890bd7c4874e04a6ada1c7d40716e7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/fa890bd7c4874e04a6ada1c7d40716e7 2024-11-20T22:23:40,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:40,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141480632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:40,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:40,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141480632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:40,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:40,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141480632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:40,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:40,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141480635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:40,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:40,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141480635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:40,651 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/fa890bd7c4874e04a6ada1c7d40716e7, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T22:23:40,653 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 36a256a4871d36dc6632cf0cdb971cbb in 1217ms, sequenceid=354, compaction requested=false 2024-11-20T22:23:40,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:40,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:40,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-20T22:23:40,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-20T22:23:40,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-20T22:23:40,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3760 sec 2024-11-20T22:23:40,666 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.3860 sec 2024-11-20T22:23:41,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T22:23:41,388 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-20T22:23:41,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:41,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-20T22:23:41,393 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:41,394 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:41,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:41,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T22:23:41,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T22:23:41,554 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:41,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T22:23:41,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:41,556 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:23:41,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:41,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:41,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:41,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:41,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:41,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:41,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/873f11329b0a447791585bc49c3909db is 50, key is test_row_0/A:col10/1732141419469/Put/seqid=0 2024-11-20T22:23:41,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741911_1087 (size=12301) 2024-11-20T22:23:41,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:41,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:41,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T22:23:41,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:41,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141481753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:41,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:41,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141481791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:41,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:41,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141481793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:41,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:41,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141481798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:41,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:41,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141481798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:41,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:41,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141481901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:41,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:41,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141481904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:41,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:41,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141481907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:41,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:41,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:41,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141481907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:41,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141481907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T22:23:42,006 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/873f11329b0a447791585bc49c3909db 2024-11-20T22:23:42,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/791046d6084845128a6b3ee3b04815f5 is 50, key is test_row_0/B:col10/1732141419469/Put/seqid=0 2024-11-20T22:23:42,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741912_1088 (size=12301) 2024-11-20T22:23:42,058 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/791046d6084845128a6b3ee3b04815f5 2024-11-20T22:23:42,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/c214153209714a27b07a2c0da3b9615d is 50, key is test_row_0/C:col10/1732141419469/Put/seqid=0 2024-11-20T22:23:42,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741913_1089 (size=12301) 2024-11-20T22:23:42,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141482104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141482113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141482114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141482112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141482115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141482411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141482419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141482421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141482423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141482429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,497 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/c214153209714a27b07a2c0da3b9615d 2024-11-20T22:23:42,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T22:23:42,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/873f11329b0a447791585bc49c3909db as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/873f11329b0a447791585bc49c3909db 2024-11-20T22:23:42,519 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/873f11329b0a447791585bc49c3909db, entries=150, sequenceid=370, filesize=12.0 K 2024-11-20T22:23:42,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/791046d6084845128a6b3ee3b04815f5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/791046d6084845128a6b3ee3b04815f5 2024-11-20T22:23:42,528 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/791046d6084845128a6b3ee3b04815f5, entries=150, sequenceid=370, filesize=12.0 K 2024-11-20T22:23:42,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/c214153209714a27b07a2c0da3b9615d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c214153209714a27b07a2c0da3b9615d 2024-11-20T22:23:42,537 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c214153209714a27b07a2c0da3b9615d, entries=150, sequenceid=370, filesize=12.0 K 2024-11-20T22:23:42,538 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 36a256a4871d36dc6632cf0cdb971cbb in 982ms, sequenceid=370, compaction requested=true 2024-11-20T22:23:42,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:42,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:42,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-20T22:23:42,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-20T22:23:42,547 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-20T22:23:42,547 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1460 sec 2024-11-20T22:23:42,549 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.1580 sec 2024-11-20T22:23:42,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:42,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:23:42,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:42,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:42,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:42,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:42,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:42,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:42,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/e962bd356c124059a656160655b6bc43 is 50, key is test_row_0/A:col10/1732141421749/Put/seqid=0 2024-11-20T22:23:42,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741914_1090 (size=14741) 2024-11-20T22:23:42,930 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/e962bd356c124059a656160655b6bc43 2024-11-20T22:23:42,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141482930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141482930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141482932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141482932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:42,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141482934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:42,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/0ea40a877d714e8792a9d75045cb833b is 50, key is test_row_0/B:col10/1732141421749/Put/seqid=0 2024-11-20T22:23:42,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741915_1091 (size=12301) 2024-11-20T22:23:43,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:43,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141483033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:43,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141483036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:43,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141483037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:43,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141483039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:43,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141483239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:43,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141483239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:43,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141483255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:43,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141483255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,349 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/0ea40a877d714e8792a9d75045cb833b 2024-11-20T22:23:43,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/1e52e2fd37b74d8080e791c00b2c1d0f is 50, key is test_row_0/C:col10/1732141421749/Put/seqid=0 2024-11-20T22:23:43,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741916_1092 (size=12301) 2024-11-20T22:23:43,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T22:23:43,507 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-20T22:23:43,508 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:43,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-20T22:23:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T22:23:43,512 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:43,512 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:43,512 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:43,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:43,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141483543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:43,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141483546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:43,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141483565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141483571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T22:23:43,665 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T22:23:43,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:43,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:43,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:43,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:43,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:43,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:43,779 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/1e52e2fd37b74d8080e791c00b2c1d0f 2024-11-20T22:23:43,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/e962bd356c124059a656160655b6bc43 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/e962bd356c124059a656160655b6bc43 2024-11-20T22:23:43,791 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/e962bd356c124059a656160655b6bc43, entries=200, sequenceid=395, filesize=14.4 K 2024-11-20T22:23:43,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/0ea40a877d714e8792a9d75045cb833b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0ea40a877d714e8792a9d75045cb833b 2024-11-20T22:23:43,799 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0ea40a877d714e8792a9d75045cb833b, entries=150, sequenceid=395, filesize=12.0 K 2024-11-20T22:23:43,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/1e52e2fd37b74d8080e791c00b2c1d0f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/1e52e2fd37b74d8080e791c00b2c1d0f 2024-11-20T22:23:43,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T22:23:43,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/1e52e2fd37b74d8080e791c00b2c1d0f, entries=150, sequenceid=395, filesize=12.0 K 2024-11-20T22:23:43,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 36a256a4871d36dc6632cf0cdb971cbb in 898ms, sequenceid=395, compaction requested=true 2024-11-20T22:23:43,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:43,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:43,815 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:43,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:43,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:43,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:43,816 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:43,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:43,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:43,817 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54834 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:43,817 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/A is initiating minor compaction (all files) 2024-11-20T22:23:43,817 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/A in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:43,817 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/da1f868db3da4eef81cae7815df54a05, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d05570d2f6ed4497952622cb14f60b30, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/873f11329b0a447791585bc49c3909db, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/e962bd356c124059a656160655b6bc43] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=53.5 K 2024-11-20T22:23:43,818 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:43,818 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/B is initiating minor compaction (all files) 2024-11-20T22:23:43,818 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/B in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:43,818 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/ffe38d712bfa42ffb1177794aeebe934, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e785dbf75cb44763934a5393d96d961c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/791046d6084845128a6b3ee3b04815f5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0ea40a877d714e8792a9d75045cb833b] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=48.8 K 2024-11-20T22:23:43,818 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting da1f868db3da4eef81cae7815df54a05, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141418421 2024-11-20T22:23:43,818 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:43,818 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ffe38d712bfa42ffb1177794aeebe934, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141418421 2024-11-20T22:23:43,819 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting d05570d2f6ed4497952622cb14f60b30, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732141419117 2024-11-20T22:23:43,820 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 873f11329b0a447791585bc49c3909db, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732141419464 2024-11-20T22:23:43,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T22:23:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:43,821 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:23:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:43,826 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting e785dbf75cb44763934a5393d96d961c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732141419117 2024-11-20T22:23:43,826 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting e962bd356c124059a656160655b6bc43, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732141421749 2024-11-20T22:23:43,827 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 791046d6084845128a6b3ee3b04815f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732141419464 2024-11-20T22:23:43,830 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ea40a877d714e8792a9d75045cb833b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732141421749 2024-11-20T22:23:43,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/00d56092768847ddad9047d31b9f6ad1 is 50, key is test_row_0/A:col10/1732141422928/Put/seqid=0 2024-11-20T22:23:43,847 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#A#compaction#79 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:43,848 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/a1ea5523aeb14ba0ae1c725eb67cf95e is 50, key is test_row_0/A:col10/1732141421749/Put/seqid=0 2024-11-20T22:23:43,857 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#B#compaction#80 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:43,862 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/708621c8158445f090c34d3de6cf6b6a is 50, key is test_row_0/B:col10/1732141421749/Put/seqid=0 2024-11-20T22:23:43,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741917_1093 (size=12301) 2024-11-20T22:23:43,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741918_1094 (size=13187) 2024-11-20T22:23:43,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741919_1095 (size=13187) 2024-11-20T22:23:43,925 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/a1ea5523aeb14ba0ae1c725eb67cf95e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/a1ea5523aeb14ba0ae1c725eb67cf95e 2024-11-20T22:23:43,929 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/708621c8158445f090c34d3de6cf6b6a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/708621c8158445f090c34d3de6cf6b6a 2024-11-20T22:23:43,933 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/A of 36a256a4871d36dc6632cf0cdb971cbb into a1ea5523aeb14ba0ae1c725eb67cf95e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:43,934 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:43,934 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/A, priority=12, startTime=1732141423815; duration=0sec 2024-11-20T22:23:43,934 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:43,934 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:A 2024-11-20T22:23:43,934 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:43,936 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:43,936 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/C is initiating minor compaction (all files) 2024-11-20T22:23:43,937 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/C in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:43,937 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/B of 36a256a4871d36dc6632cf0cdb971cbb into 708621c8158445f090c34d3de6cf6b6a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:43,937 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:43,937 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/B, priority=12, startTime=1732141423815; duration=0sec 2024-11-20T22:23:43,937 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/e3a5324926754aba847a3e7caa293a39, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/fa890bd7c4874e04a6ada1c7d40716e7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c214153209714a27b07a2c0da3b9615d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/1e52e2fd37b74d8080e791c00b2c1d0f] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=48.8 K 2024-11-20T22:23:43,937 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:43,937 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:B 2024-11-20T22:23:43,937 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3a5324926754aba847a3e7caa293a39, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141418421 2024-11-20T22:23:43,938 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa890bd7c4874e04a6ada1c7d40716e7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732141419117 2024-11-20T22:23:43,938 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting c214153209714a27b07a2c0da3b9615d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732141419464 2024-11-20T22:23:43,939 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e52e2fd37b74d8080e791c00b2c1d0f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732141421749 2024-11-20T22:23:43,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:43,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:43,962 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#C#compaction#81 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:43,963 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/2e164f9cda8d40728560ffbcb5f3a4a5 is 50, key is test_row_0/C:col10/1732141421749/Put/seqid=0 2024-11-20T22:23:43,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741920_1096 (size=13187) 2024-11-20T22:23:43,989 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/2e164f9cda8d40728560ffbcb5f3a4a5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/2e164f9cda8d40728560ffbcb5f3a4a5 2024-11-20T22:23:44,004 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/C of 36a256a4871d36dc6632cf0cdb971cbb into 2e164f9cda8d40728560ffbcb5f3a4a5(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:44,004 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:44,004 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/C, priority=12, startTime=1732141423816; duration=0sec 2024-11-20T22:23:44,004 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:44,005 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:C 2024-11-20T22:23:44,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:44,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141484030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:44,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:44,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141484048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:44,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:44,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141484055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:44,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:44,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141484075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:44,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:44,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141484081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:44,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T22:23:44,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:44,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141484132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:44,276 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/00d56092768847ddad9047d31b9f6ad1 2024-11-20T22:23:44,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/c9a7f38ff54044d28e40313e37ba41d7 is 50, key is test_row_0/B:col10/1732141422928/Put/seqid=0 2024-11-20T22:23:44,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741921_1097 (size=12301) 2024-11-20T22:23:44,317 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/c9a7f38ff54044d28e40313e37ba41d7 2024-11-20T22:23:44,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/fa58ea8b2f37475db3e46fdf16768609 is 50, key is test_row_0/C:col10/1732141422928/Put/seqid=0 2024-11-20T22:23:44,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741922_1098 (size=12301) 2024-11-20T22:23:44,340 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/fa58ea8b2f37475db3e46fdf16768609 2024-11-20T22:23:44,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:44,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141484345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:44,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/00d56092768847ddad9047d31b9f6ad1 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/00d56092768847ddad9047d31b9f6ad1 2024-11-20T22:23:44,357 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/00d56092768847ddad9047d31b9f6ad1, entries=150, sequenceid=406, filesize=12.0 K 2024-11-20T22:23:44,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/c9a7f38ff54044d28e40313e37ba41d7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c9a7f38ff54044d28e40313e37ba41d7 2024-11-20T22:23:44,370 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c9a7f38ff54044d28e40313e37ba41d7, entries=150, sequenceid=406, filesize=12.0 K 2024-11-20T22:23:44,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/fa58ea8b2f37475db3e46fdf16768609 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/fa58ea8b2f37475db3e46fdf16768609 2024-11-20T22:23:44,384 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/fa58ea8b2f37475db3e46fdf16768609, entries=150, sequenceid=406, filesize=12.0 K 2024-11-20T22:23:44,388 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 36a256a4871d36dc6632cf0cdb971cbb in 567ms, sequenceid=406, compaction requested=false 2024-11-20T22:23:44,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:44,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:44,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-20T22:23:44,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-20T22:23:44,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-20T22:23:44,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 877 msec 2024-11-20T22:23:44,392 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 883 msec 2024-11-20T22:23:44,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T22:23:44,614 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-20T22:23:44,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:44,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-20T22:23:44,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T22:23:44,618 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:44,618 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:44,619 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:44,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:44,650 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T22:23:44,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:44,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:44,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:44,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:44,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:44,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:44,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/f35f59dece0142118cf3f0874036bf14 is 50, key is test_row_0/A:col10/1732141424649/Put/seqid=0 2024-11-20T22:23:44,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:44,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141484669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:44,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741923_1099 (size=14741) 2024-11-20T22:23:44,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/f35f59dece0142118cf3f0874036bf14 2024-11-20T22:23:44,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/e76c4a8529ff4991aba63a8019b63e6d is 50, key is test_row_0/B:col10/1732141424649/Put/seqid=0 2024-11-20T22:23:44,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741924_1100 (size=12301) 2024-11-20T22:23:44,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T22:23:44,772 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:44,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T22:23:44,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:44,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:44,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141484773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:44,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:44,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:44,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:44,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:44,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:44,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T22:23:44,926 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:44,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T22:23:44,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:44,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:44,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:44,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:44,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:44,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:44,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:44,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141484976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:45,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:45,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1732141485057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:45,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:45,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1732141485060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:45,081 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:45,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:45,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T22:23:45,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48350 deadline: 1732141485082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:45,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:45,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:45,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:45,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:45,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:45,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:45,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:45,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48308 deadline: 1732141485087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:45,093 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/e76c4a8529ff4991aba63a8019b63e6d 2024-11-20T22:23:45,121 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/0c252c96ae304a0f8db3fdff3afee35b is 50, key is test_row_0/C:col10/1732141424649/Put/seqid=0 2024-11-20T22:23:45,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741925_1101 (size=12301) 2024-11-20T22:23:45,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T22:23:45,235 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:45,236 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T22:23:45,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:45,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:45,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:45,236 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:45,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:45,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:45,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:45,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141485292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:45,389 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:45,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T22:23:45,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:45,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:45,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:45,390 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:45,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:45,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:45,542 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:45,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T22:23:45,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:45,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:45,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:45,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:45,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:45,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:45,548 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/0c252c96ae304a0f8db3fdff3afee35b 2024-11-20T22:23:45,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/f35f59dece0142118cf3f0874036bf14 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/f35f59dece0142118cf3f0874036bf14 2024-11-20T22:23:45,567 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/f35f59dece0142118cf3f0874036bf14, entries=200, sequenceid=435, filesize=14.4 K 2024-11-20T22:23:45,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/e76c4a8529ff4991aba63a8019b63e6d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e76c4a8529ff4991aba63a8019b63e6d 2024-11-20T22:23:45,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e76c4a8529ff4991aba63a8019b63e6d, entries=150, sequenceid=435, filesize=12.0 K 2024-11-20T22:23:45,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/0c252c96ae304a0f8db3fdff3afee35b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/0c252c96ae304a0f8db3fdff3afee35b 2024-11-20T22:23:45,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/0c252c96ae304a0f8db3fdff3afee35b, entries=150, sequenceid=435, filesize=12.0 K 2024-11-20T22:23:45,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 36a256a4871d36dc6632cf0cdb971cbb in 950ms, sequenceid=435, compaction requested=true 2024-11-20T22:23:45,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:45,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:45,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:45,601 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:45,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:45,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:45,601 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:45,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:45,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:45,602 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:45,602 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:45,602 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/A is initiating minor compaction (all files) 2024-11-20T22:23:45,602 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/B is initiating minor compaction (all files) 2024-11-20T22:23:45,602 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/B in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:45,602 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/A in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:45,602 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/a1ea5523aeb14ba0ae1c725eb67cf95e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/00d56092768847ddad9047d31b9f6ad1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/f35f59dece0142118cf3f0874036bf14] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=39.3 K 2024-11-20T22:23:45,602 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/708621c8158445f090c34d3de6cf6b6a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c9a7f38ff54044d28e40313e37ba41d7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e76c4a8529ff4991aba63a8019b63e6d] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=36.9 K 2024-11-20T22:23:45,602 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1ea5523aeb14ba0ae1c725eb67cf95e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732141421749 2024-11-20T22:23:45,602 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 708621c8158445f090c34d3de6cf6b6a, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732141421749 2024-11-20T22:23:45,603 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00d56092768847ddad9047d31b9f6ad1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1732141422928 2024-11-20T22:23:45,603 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c9a7f38ff54044d28e40313e37ba41d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1732141422928 2024-11-20T22:23:45,603 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting e76c4a8529ff4991aba63a8019b63e6d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732141424020 2024-11-20T22:23:45,606 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting f35f59dece0142118cf3f0874036bf14, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732141424020 2024-11-20T22:23:45,625 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#B#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:45,626 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/1b9b6ebe354e4031bc0d2a18ed8784ca is 50, key is test_row_0/B:col10/1732141424649/Put/seqid=0 2024-11-20T22:23:45,629 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#A#compaction#88 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:45,629 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/46cb1ce697384a3a97b6b67849380ad9 is 50, key is test_row_0/A:col10/1732141424649/Put/seqid=0 2024-11-20T22:23:45,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741926_1102 (size=13289) 2024-11-20T22:23:45,643 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/1b9b6ebe354e4031bc0d2a18ed8784ca as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/1b9b6ebe354e4031bc0d2a18ed8784ca 2024-11-20T22:23:45,651 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/B of 36a256a4871d36dc6632cf0cdb971cbb into 1b9b6ebe354e4031bc0d2a18ed8784ca(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:45,651 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:45,651 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/B, priority=13, startTime=1732141425601; duration=0sec 2024-11-20T22:23:45,652 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:45,652 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:B 2024-11-20T22:23:45,652 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:45,655 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:45,656 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 36a256a4871d36dc6632cf0cdb971cbb/C is initiating minor compaction (all files) 2024-11-20T22:23:45,656 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 36a256a4871d36dc6632cf0cdb971cbb/C in TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:45,656 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/2e164f9cda8d40728560ffbcb5f3a4a5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/fa58ea8b2f37475db3e46fdf16768609, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/0c252c96ae304a0f8db3fdff3afee35b] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp, totalSize=36.9 K 2024-11-20T22:23:45,657 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e164f9cda8d40728560ffbcb5f3a4a5, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732141421749 2024-11-20T22:23:45,657 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting fa58ea8b2f37475db3e46fdf16768609, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1732141422928 2024-11-20T22:23:45,658 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c252c96ae304a0f8db3fdff3afee35b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732141424020 2024-11-20T22:23:45,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741927_1103 (size=13289) 2024-11-20T22:23:45,670 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a256a4871d36dc6632cf0cdb971cbb#C#compaction#89 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:45,671 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/139387d95a5d4417aab00a32487c8e6c is 50, key is test_row_0/C:col10/1732141424649/Put/seqid=0 2024-11-20T22:23:45,673 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/46cb1ce697384a3a97b6b67849380ad9 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/46cb1ce697384a3a97b6b67849380ad9 2024-11-20T22:23:45,680 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/A of 36a256a4871d36dc6632cf0cdb971cbb into 46cb1ce697384a3a97b6b67849380ad9(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:45,680 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:45,680 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/A, priority=13, startTime=1732141425600; duration=0sec 2024-11-20T22:23:45,681 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:45,681 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:A 2024-11-20T22:23:45,695 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:45,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T22:23:45,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741928_1104 (size=13289) 2024-11-20T22:23:45,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:45,697 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T22:23:45,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:45,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:45,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:45,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:45,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:45,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:45,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/c7e36281ffd140cf9175bf39c815c5f2 is 50, key is test_row_0/A:col10/1732141424659/Put/seqid=0 2024-11-20T22:23:45,707 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/139387d95a5d4417aab00a32487c8e6c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/139387d95a5d4417aab00a32487c8e6c 2024-11-20T22:23:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T22:23:45,728 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 36a256a4871d36dc6632cf0cdb971cbb/C of 36a256a4871d36dc6632cf0cdb971cbb into 139387d95a5d4417aab00a32487c8e6c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:45,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741929_1105 (size=12301) 2024-11-20T22:23:45,728 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:45,728 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb., storeName=36a256a4871d36dc6632cf0cdb971cbb/C, priority=13, startTime=1732141425601; duration=0sec 2024-11-20T22:23:45,728 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:45,728 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:C 2024-11-20T22:23:45,732 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/c7e36281ffd140cf9175bf39c815c5f2 2024-11-20T22:23:45,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/f0398eafb0134d75986dc2295432cd23 is 50, key is test_row_0/B:col10/1732141424659/Put/seqid=0 2024-11-20T22:23:45,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741930_1106 (size=12301) 2024-11-20T22:23:45,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. as already flushing 2024-11-20T22:23:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:45,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141485855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:45,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:45,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141485959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:46,155 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/f0398eafb0134d75986dc2295432cd23 2024-11-20T22:23:46,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141486166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:46,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/230f2cc82fdd4c9a812bd1a6a8b055d1 is 50, key is test_row_0/C:col10/1732141424659/Put/seqid=0 2024-11-20T22:23:46,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741931_1107 (size=12301) 2024-11-20T22:23:46,403 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x475ca0f4 to 127.0.0.1:51822 2024-11-20T22:23:46,403 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:23:46,405 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f0c7188 to 127.0.0.1:51822 2024-11-20T22:23:46,405 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:23:46,408 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x50c9c1d1 to 127.0.0.1:51822 2024-11-20T22:23:46,408 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:23:46,409 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f1331a9 to 127.0.0.1:51822 2024-11-20T22:23:46,409 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:23:46,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141486472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:46,635 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/230f2cc82fdd4c9a812bd1a6a8b055d1 2024-11-20T22:23:46,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/c7e36281ffd140cf9175bf39c815c5f2 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/c7e36281ffd140cf9175bf39c815c5f2 2024-11-20T22:23:46,649 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/c7e36281ffd140cf9175bf39c815c5f2, entries=150, sequenceid=447, filesize=12.0 K 2024-11-20T22:23:46,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/f0398eafb0134d75986dc2295432cd23 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/f0398eafb0134d75986dc2295432cd23 2024-11-20T22:23:46,656 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/f0398eafb0134d75986dc2295432cd23, entries=150, sequenceid=447, filesize=12.0 K 2024-11-20T22:23:46,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/230f2cc82fdd4c9a812bd1a6a8b055d1 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/230f2cc82fdd4c9a812bd1a6a8b055d1 2024-11-20T22:23:46,663 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/230f2cc82fdd4c9a812bd1a6a8b055d1, entries=150, sequenceid=447, filesize=12.0 K 2024-11-20T22:23:46,666 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 36a256a4871d36dc6632cf0cdb971cbb in 969ms, sequenceid=447, compaction requested=false 2024-11-20T22:23:46,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:46,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:46,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-20T22:23:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-20T22:23:46,670 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-20T22:23:46,671 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0490 sec 2024-11-20T22:23:46,675 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 2.0570 sec 2024-11-20T22:23:46,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T22:23:46,722 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-20T22:23:46,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:46,978 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T22:23:46,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:46,978 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x39b10898 to 127.0.0.1:51822 2024-11-20T22:23:46,978 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:23:46,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:46,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:46,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:46,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:46,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:46,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/5bdf3ecb849641869cdd9f9f391b90d9 is 50, key is test_row_0/A:col10/1732141426976/Put/seqid=0 2024-11-20T22:23:46,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741932_1108 (size=12301) 2024-11-20T22:23:47,080 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x30d4d4c6 to 127.0.0.1:51822 2024-11-20T22:23:47,080 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:23:47,082 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x054c943d to 127.0.0.1:51822 2024-11-20T22:23:47,082 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:23:47,086 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2cac4303 to 127.0.0.1:51822 2024-11-20T22:23:47,086 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:23:47,096 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d7115de to 127.0.0.1:51822 2024-11-20T22:23:47,096 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:23:47,096 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T22:23:47,096 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-11-20T22:23:47,096 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 118 2024-11-20T22:23:47,096 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-20T22:23:47,096 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-11-20T22:23:47,096 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-11-20T22:23:47,096 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T22:23:47,096 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3485 2024-11-20T22:23:47,097 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3425 2024-11-20T22:23:47,097 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T22:23:47,097 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1561 2024-11-20T22:23:47,097 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4683 rows 2024-11-20T22:23:47,097 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1548 2024-11-20T22:23:47,097 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4644 rows 2024-11-20T22:23:47,097 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:23:47,097 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e59596a to 127.0.0.1:51822 2024-11-20T22:23:47,097 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:23:47,103 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T22:23:47,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T22:23:47,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T22:23:47,118 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141427117"}]},"ts":"1732141427117"} 2024-11-20T22:23:47,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T22:23:47,119 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T22:23:47,132 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T22:23:47,134 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:23:47,139 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=36a256a4871d36dc6632cf0cdb971cbb, UNASSIGN}] 2024-11-20T22:23:47,140 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=36a256a4871d36dc6632cf0cdb971cbb, UNASSIGN 2024-11-20T22:23:47,141 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=36a256a4871d36dc6632cf0cdb971cbb, regionState=CLOSING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:47,142 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:23:47,142 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure 36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:23:47,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T22:23:47,298 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:47,299 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:47,300 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:23:47,300 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing 36a256a4871d36dc6632cf0cdb971cbb, disabling compactions & flushes 2024-11-20T22:23:47,300 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:47,392 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/5bdf3ecb849641869cdd9f9f391b90d9 2024-11-20T22:23:47,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/6dd69b1a9ef6400cbb42888fcc32478c is 50, key is test_row_0/B:col10/1732141426976/Put/seqid=0 2024-11-20T22:23:47,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741933_1109 (size=12301) 2024-11-20T22:23:47,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T22:23:47,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T22:23:47,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/6dd69b1a9ef6400cbb42888fcc32478c 2024-11-20T22:23:47,836 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/b4e014f1ea8c45e6a1afd3b78e42c2be is 50, key is test_row_0/C:col10/1732141426976/Put/seqid=0 2024-11-20T22:23:47,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741934_1110 (size=12301) 2024-11-20T22:23:48,086 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T22:23:48,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T22:23:48,249 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/b4e014f1ea8c45e6a1afd3b78e42c2be 2024-11-20T22:23:48,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/5bdf3ecb849641869cdd9f9f391b90d9 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/5bdf3ecb849641869cdd9f9f391b90d9 2024-11-20T22:23:48,280 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/5bdf3ecb849641869cdd9f9f391b90d9, entries=150, sequenceid=475, filesize=12.0 K 2024-11-20T22:23:48,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/6dd69b1a9ef6400cbb42888fcc32478c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/6dd69b1a9ef6400cbb42888fcc32478c 2024-11-20T22:23:48,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/6dd69b1a9ef6400cbb42888fcc32478c, entries=150, sequenceid=475, filesize=12.0 K 2024-11-20T22:23:48,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/b4e014f1ea8c45e6a1afd3b78e42c2be as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/b4e014f1ea8c45e6a1afd3b78e42c2be 2024-11-20T22:23:48,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/b4e014f1ea8c45e6a1afd3b78e42c2be, entries=150, sequenceid=475, filesize=12.0 K 2024-11-20T22:23:48,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=26.84 KB/27480 for 36a256a4871d36dc6632cf0cdb971cbb in 1338ms, sequenceid=475, compaction requested=true 2024-11-20T22:23:48,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:48,316 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:48,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:48,316 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:48,316 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. because compaction request was cancelled 2024-11-20T22:23:48,316 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. after waiting 0 ms 2024-11-20T22:23:48,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:48,316 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:A 2024-11-20T22:23:48,316 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:48,316 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. because compaction request was cancelled 2024-11-20T22:23:48,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:B, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:48,316 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:B 2024-11-20T22:23:48,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:48,316 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(2837): Flushing 36a256a4871d36dc6632cf0cdb971cbb 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T22:23:48,316 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. because compaction request was cancelled 2024-11-20T22:23:48,317 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a256a4871d36dc6632cf0cdb971cbb:C 2024-11-20T22:23:48,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=A 2024-11-20T22:23:48,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:48,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=B 2024-11-20T22:23:48,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:48,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 36a256a4871d36dc6632cf0cdb971cbb, store=C 2024-11-20T22:23:48,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:48,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 36a256a4871d36dc6632cf0cdb971cbb:C, priority=-2147483648, current under compaction store size is 0 2024-11-20T22:23:48,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:48,332 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/f15104687abf40e0b1721f48dcdc1a46 is 50, key is test_row_0/A:col10/1732141427085/Put/seqid=0 2024-11-20T22:23:48,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741935_1111 (size=12301) 2024-11-20T22:23:48,798 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=482 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/f15104687abf40e0b1721f48dcdc1a46 2024-11-20T22:23:48,828 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/cbcdb1cbd3db43d1b36c570a79685230 is 50, key is test_row_0/B:col10/1732141427085/Put/seqid=0 2024-11-20T22:23:48,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741936_1112 (size=12301) 2024-11-20T22:23:48,851 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=482 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/cbcdb1cbd3db43d1b36c570a79685230 2024-11-20T22:23:48,872 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/b56cf9cf171b4fcd85a97964eed0aaca is 50, key is test_row_0/C:col10/1732141427085/Put/seqid=0 2024-11-20T22:23:48,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741937_1113 (size=12301) 2024-11-20T22:23:48,908 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=482 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/b56cf9cf171b4fcd85a97964eed0aaca 2024-11-20T22:23:48,938 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/A/f15104687abf40e0b1721f48dcdc1a46 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/f15104687abf40e0b1721f48dcdc1a46 2024-11-20T22:23:48,948 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/f15104687abf40e0b1721f48dcdc1a46, entries=150, sequenceid=482, filesize=12.0 K 2024-11-20T22:23:48,950 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/B/cbcdb1cbd3db43d1b36c570a79685230 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/cbcdb1cbd3db43d1b36c570a79685230 2024-11-20T22:23:48,960 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/cbcdb1cbd3db43d1b36c570a79685230, entries=150, sequenceid=482, filesize=12.0 K 2024-11-20T22:23:48,961 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/.tmp/C/b56cf9cf171b4fcd85a97964eed0aaca as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/b56cf9cf171b4fcd85a97964eed0aaca 2024-11-20T22:23:48,970 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/b56cf9cf171b4fcd85a97964eed0aaca, entries=150, sequenceid=482, filesize=12.0 K 2024-11-20T22:23:48,972 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 36a256a4871d36dc6632cf0cdb971cbb in 655ms, sequenceid=482, compaction requested=true 2024-11-20T22:23:48,975 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/80c90a30c2fb49c6b7f4eb3b7c2455e4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/1a7462b665f741b5982270fe0212ebc6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d5d04779900b4329886724b28a0b77b6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ff1cec0656141a2a0b2e1408096ec43, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/c814267dee164e148335994d2b891a2f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/515239f0d0474cf5b6abc0eddd4474f2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/30cd97e34684495c8144f68d5d79129c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/4d4289662fed4e0699621cd323b8c89c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8acc2a831433477bac8151b848bd4a75, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d4023fe99c684cfbab1f61bc59a967d9, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/093475e0021d4db29c9cbc4e32817ca0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/b2a8eb21fc3149cd83eddbc48d1f9d56, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d5136c9a71274c80986463c6622437ad, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/e431f5fe61ce42c38a1e211c5421b14b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/85d4603566544a9891ac7e6893da89fe, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ca75427dc30442cbb082712e9613fd6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/fd946ccc83ea493eab28278a567ce389, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ea0001a70d244f88a0e5b10e75cfea8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d0f236c24cb44828b85f6caef1c793d5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d89f84870cde4c11aad0422e76bb8292, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/4875fc8a8b1b4e48a68c3db57dacf734, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/da1f868db3da4eef81cae7815df54a05, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/9f5007c8471549fb998fb89db8eec13d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d05570d2f6ed4497952622cb14f60b30, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/873f11329b0a447791585bc49c3909db, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/e962bd356c124059a656160655b6bc43, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/a1ea5523aeb14ba0ae1c725eb67cf95e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/00d56092768847ddad9047d31b9f6ad1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/f35f59dece0142118cf3f0874036bf14] to archive 2024-11-20T22:23:48,980 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:23:49,005 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/80c90a30c2fb49c6b7f4eb3b7c2455e4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/80c90a30c2fb49c6b7f4eb3b7c2455e4 2024-11-20T22:23:49,008 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/1a7462b665f741b5982270fe0212ebc6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/1a7462b665f741b5982270fe0212ebc6 2024-11-20T22:23:49,010 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d5d04779900b4329886724b28a0b77b6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d5d04779900b4329886724b28a0b77b6 2024-11-20T22:23:49,014 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ff1cec0656141a2a0b2e1408096ec43 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ff1cec0656141a2a0b2e1408096ec43 2024-11-20T22:23:49,021 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/c814267dee164e148335994d2b891a2f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/c814267dee164e148335994d2b891a2f 2024-11-20T22:23:49,025 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/515239f0d0474cf5b6abc0eddd4474f2 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/515239f0d0474cf5b6abc0eddd4474f2 2024-11-20T22:23:49,028 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/30cd97e34684495c8144f68d5d79129c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/30cd97e34684495c8144f68d5d79129c 2024-11-20T22:23:49,032 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/4d4289662fed4e0699621cd323b8c89c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/4d4289662fed4e0699621cd323b8c89c 2024-11-20T22:23:49,034 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8acc2a831433477bac8151b848bd4a75 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8acc2a831433477bac8151b848bd4a75 2024-11-20T22:23:49,036 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d4023fe99c684cfbab1f61bc59a967d9 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d4023fe99c684cfbab1f61bc59a967d9 2024-11-20T22:23:49,039 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/093475e0021d4db29c9cbc4e32817ca0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/093475e0021d4db29c9cbc4e32817ca0 2024-11-20T22:23:49,042 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/b2a8eb21fc3149cd83eddbc48d1f9d56 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/b2a8eb21fc3149cd83eddbc48d1f9d56 2024-11-20T22:23:49,046 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d5136c9a71274c80986463c6622437ad to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d5136c9a71274c80986463c6622437ad 2024-11-20T22:23:49,049 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/e431f5fe61ce42c38a1e211c5421b14b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/e431f5fe61ce42c38a1e211c5421b14b 2024-11-20T22:23:49,053 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/85d4603566544a9891ac7e6893da89fe to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/85d4603566544a9891ac7e6893da89fe 2024-11-20T22:23:49,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ca75427dc30442cbb082712e9613fd6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ca75427dc30442cbb082712e9613fd6 2024-11-20T22:23:49,064 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/fd946ccc83ea493eab28278a567ce389 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/fd946ccc83ea493eab28278a567ce389 2024-11-20T22:23:49,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ea0001a70d244f88a0e5b10e75cfea8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/8ea0001a70d244f88a0e5b10e75cfea8 2024-11-20T22:23:49,082 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d0f236c24cb44828b85f6caef1c793d5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d0f236c24cb44828b85f6caef1c793d5 2024-11-20T22:23:49,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d89f84870cde4c11aad0422e76bb8292 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d89f84870cde4c11aad0422e76bb8292 2024-11-20T22:23:49,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/4875fc8a8b1b4e48a68c3db57dacf734 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/4875fc8a8b1b4e48a68c3db57dacf734 2024-11-20T22:23:49,147 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/da1f868db3da4eef81cae7815df54a05 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/da1f868db3da4eef81cae7815df54a05 2024-11-20T22:23:49,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/9f5007c8471549fb998fb89db8eec13d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/9f5007c8471549fb998fb89db8eec13d 2024-11-20T22:23:49,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d05570d2f6ed4497952622cb14f60b30 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/d05570d2f6ed4497952622cb14f60b30 2024-11-20T22:23:49,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/873f11329b0a447791585bc49c3909db to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/873f11329b0a447791585bc49c3909db 2024-11-20T22:23:49,179 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/e962bd356c124059a656160655b6bc43 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/e962bd356c124059a656160655b6bc43 2024-11-20T22:23:49,191 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/a1ea5523aeb14ba0ae1c725eb67cf95e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/a1ea5523aeb14ba0ae1c725eb67cf95e 2024-11-20T22:23:49,207 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/00d56092768847ddad9047d31b9f6ad1 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/00d56092768847ddad9047d31b9f6ad1 2024-11-20T22:23:49,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T22:23:49,228 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/f35f59dece0142118cf3f0874036bf14 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/f35f59dece0142118cf3f0874036bf14 2024-11-20T22:23:49,264 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2415d35151b543e8bc9dbb15405334cd, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0b221af380ae426ea963d7551ea2de0a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/97f37a19e7834d40bd73ef845c3c23de, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c01a093f53524a9881358c83a1181ab3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/50ae2840bdf7465aa01cb0edd1ad3111, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/1b1ebd3069f5481496f4ae94e27493d8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/89abe8a0bc54401faf0126f2f5ec8c43, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/f95bf22089e649d7a5b59e04ee228bc4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/3c3fb9b57c19490a86431879ca842d15, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/6c4d5503b0924178936dcbebe05aa468, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/5b3367f850704c33aee6f24b5124ef5d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0df53cfc286f44ddaca12d8f9831831b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e9bc96329ae646078f0bb84a5e921625, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2b67b77f3f464f0493e6852a690a76cb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0ec5c58a57c34b8cbcef7594bb829c77, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c703176e9ad34859b83180aad4bf4116, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/ea618b5d5ade4c5ca372e29c91836806, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/8969f369d962467db601f444b8d56b68, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2d9b553381fc4b66bf4764c761e687e8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/4f3f90df92a4482d85738c1a4323c816, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/b04262b52aa84171b5596b5f1297e273, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/ffe38d712bfa42ffb1177794aeebe934, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/d78d9b26c00e4dc793d9107ac414e3b7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e785dbf75cb44763934a5393d96d961c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/791046d6084845128a6b3ee3b04815f5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/708621c8158445f090c34d3de6cf6b6a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0ea40a877d714e8792a9d75045cb833b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c9a7f38ff54044d28e40313e37ba41d7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e76c4a8529ff4991aba63a8019b63e6d] to archive 2024-11-20T22:23:49,271 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:23:49,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2415d35151b543e8bc9dbb15405334cd to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2415d35151b543e8bc9dbb15405334cd 2024-11-20T22:23:49,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0b221af380ae426ea963d7551ea2de0a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0b221af380ae426ea963d7551ea2de0a 2024-11-20T22:23:49,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/97f37a19e7834d40bd73ef845c3c23de to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/97f37a19e7834d40bd73ef845c3c23de 2024-11-20T22:23:49,294 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c01a093f53524a9881358c83a1181ab3 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c01a093f53524a9881358c83a1181ab3 2024-11-20T22:23:49,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/50ae2840bdf7465aa01cb0edd1ad3111 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/50ae2840bdf7465aa01cb0edd1ad3111 2024-11-20T22:23:49,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/1b1ebd3069f5481496f4ae94e27493d8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/1b1ebd3069f5481496f4ae94e27493d8 2024-11-20T22:23:49,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/89abe8a0bc54401faf0126f2f5ec8c43 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/89abe8a0bc54401faf0126f2f5ec8c43 2024-11-20T22:23:49,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/f95bf22089e649d7a5b59e04ee228bc4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/f95bf22089e649d7a5b59e04ee228bc4 2024-11-20T22:23:49,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/3c3fb9b57c19490a86431879ca842d15 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/3c3fb9b57c19490a86431879ca842d15 2024-11-20T22:23:49,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/6c4d5503b0924178936dcbebe05aa468 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/6c4d5503b0924178936dcbebe05aa468 2024-11-20T22:23:49,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/5b3367f850704c33aee6f24b5124ef5d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/5b3367f850704c33aee6f24b5124ef5d 2024-11-20T22:23:49,318 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0df53cfc286f44ddaca12d8f9831831b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0df53cfc286f44ddaca12d8f9831831b 2024-11-20T22:23:49,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e9bc96329ae646078f0bb84a5e921625 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e9bc96329ae646078f0bb84a5e921625 2024-11-20T22:23:49,322 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2b67b77f3f464f0493e6852a690a76cb to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2b67b77f3f464f0493e6852a690a76cb 2024-11-20T22:23:49,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0ec5c58a57c34b8cbcef7594bb829c77 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0ec5c58a57c34b8cbcef7594bb829c77 2024-11-20T22:23:49,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c703176e9ad34859b83180aad4bf4116 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c703176e9ad34859b83180aad4bf4116 2024-11-20T22:23:49,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/ea618b5d5ade4c5ca372e29c91836806 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/ea618b5d5ade4c5ca372e29c91836806 2024-11-20T22:23:49,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/8969f369d962467db601f444b8d56b68 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/8969f369d962467db601f444b8d56b68 2024-11-20T22:23:49,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2d9b553381fc4b66bf4764c761e687e8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/2d9b553381fc4b66bf4764c761e687e8 2024-11-20T22:23:49,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/4f3f90df92a4482d85738c1a4323c816 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/4f3f90df92a4482d85738c1a4323c816 2024-11-20T22:23:49,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/b04262b52aa84171b5596b5f1297e273 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/b04262b52aa84171b5596b5f1297e273 2024-11-20T22:23:49,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/ffe38d712bfa42ffb1177794aeebe934 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/ffe38d712bfa42ffb1177794aeebe934 2024-11-20T22:23:49,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/d78d9b26c00e4dc793d9107ac414e3b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/d78d9b26c00e4dc793d9107ac414e3b7 2024-11-20T22:23:49,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e785dbf75cb44763934a5393d96d961c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e785dbf75cb44763934a5393d96d961c 2024-11-20T22:23:49,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/791046d6084845128a6b3ee3b04815f5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/791046d6084845128a6b3ee3b04815f5 2024-11-20T22:23:49,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/708621c8158445f090c34d3de6cf6b6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/708621c8158445f090c34d3de6cf6b6a 2024-11-20T22:23:49,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0ea40a877d714e8792a9d75045cb833b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/0ea40a877d714e8792a9d75045cb833b 2024-11-20T22:23:49,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c9a7f38ff54044d28e40313e37ba41d7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/c9a7f38ff54044d28e40313e37ba41d7 2024-11-20T22:23:49,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e76c4a8529ff4991aba63a8019b63e6d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/e76c4a8529ff4991aba63a8019b63e6d 2024-11-20T22:23:49,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/870458fbd0864dca996687d25b9667cf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/76551996944a49ba83a86b26521d0c4c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/a30fcf4df94a43ec876973d5b97c82ff, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c7a73ecdd9dc4ff8aaba2aa5b767facb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/dad1988fb8204c228b059ce3865d9028, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/844515dd2e2746ffb8d623dfa88cca3b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/7ed7bb30ced048d6867478e260e0d35c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/9b50f8cfa24b44fdbd37f2ce44d19761, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/f9805001a90b427b808d2e1aca9ece95, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c4aaaa1b7a934cc889ddaf447e500fdf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/1f2e4fa308f141dfbd49555b6b6542be, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/7c9e17a1ae32454da5818bdad576a6ee, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/8b1c7c7e6c104f5a96dbedb318145631, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/f08061d5faf44c8da0ce2478a38da3e4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/30456054936c45679f31180aa87f3f99, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/46a2839fcd0241bfb11b23b3d0269d86, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/8dc68ccc761548588ccb416e288f6495, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/920049fe5965486dac765ce6ba9a6a1b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/3a1d5680113444eab100d9cbb4f1be17, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/be333753cb3b484b8b97747bb4d9ad88, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/bb0e115ac3bf4e5aa60a97321ed7f191, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/e3a5324926754aba847a3e7caa293a39, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/38bc3d99326c4adeaa2f6510f68dfab0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/fa890bd7c4874e04a6ada1c7d40716e7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c214153209714a27b07a2c0da3b9615d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/2e164f9cda8d40728560ffbcb5f3a4a5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/1e52e2fd37b74d8080e791c00b2c1d0f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/fa58ea8b2f37475db3e46fdf16768609, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/0c252c96ae304a0f8db3fdff3afee35b] to archive 2024-11-20T22:23:49,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:23:49,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/870458fbd0864dca996687d25b9667cf to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/870458fbd0864dca996687d25b9667cf 2024-11-20T22:23:49,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/76551996944a49ba83a86b26521d0c4c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/76551996944a49ba83a86b26521d0c4c 2024-11-20T22:23:49,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/a30fcf4df94a43ec876973d5b97c82ff to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/a30fcf4df94a43ec876973d5b97c82ff 2024-11-20T22:23:49,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c7a73ecdd9dc4ff8aaba2aa5b767facb to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c7a73ecdd9dc4ff8aaba2aa5b767facb 2024-11-20T22:23:49,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/dad1988fb8204c228b059ce3865d9028 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/dad1988fb8204c228b059ce3865d9028 2024-11-20T22:23:49,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/844515dd2e2746ffb8d623dfa88cca3b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/844515dd2e2746ffb8d623dfa88cca3b 2024-11-20T22:23:49,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/7ed7bb30ced048d6867478e260e0d35c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/7ed7bb30ced048d6867478e260e0d35c 2024-11-20T22:23:49,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/9b50f8cfa24b44fdbd37f2ce44d19761 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/9b50f8cfa24b44fdbd37f2ce44d19761 2024-11-20T22:23:49,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/f9805001a90b427b808d2e1aca9ece95 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/f9805001a90b427b808d2e1aca9ece95 2024-11-20T22:23:49,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c4aaaa1b7a934cc889ddaf447e500fdf to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c4aaaa1b7a934cc889ddaf447e500fdf 2024-11-20T22:23:49,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/1f2e4fa308f141dfbd49555b6b6542be to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/1f2e4fa308f141dfbd49555b6b6542be 2024-11-20T22:23:49,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/7c9e17a1ae32454da5818bdad576a6ee to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/7c9e17a1ae32454da5818bdad576a6ee 2024-11-20T22:23:49,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/8b1c7c7e6c104f5a96dbedb318145631 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/8b1c7c7e6c104f5a96dbedb318145631 2024-11-20T22:23:49,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/f08061d5faf44c8da0ce2478a38da3e4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/f08061d5faf44c8da0ce2478a38da3e4 2024-11-20T22:23:49,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/30456054936c45679f31180aa87f3f99 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/30456054936c45679f31180aa87f3f99 2024-11-20T22:23:49,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/46a2839fcd0241bfb11b23b3d0269d86 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/46a2839fcd0241bfb11b23b3d0269d86 2024-11-20T22:23:49,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/8dc68ccc761548588ccb416e288f6495 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/8dc68ccc761548588ccb416e288f6495 2024-11-20T22:23:49,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/920049fe5965486dac765ce6ba9a6a1b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/920049fe5965486dac765ce6ba9a6a1b 2024-11-20T22:23:49,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/3a1d5680113444eab100d9cbb4f1be17 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/3a1d5680113444eab100d9cbb4f1be17 2024-11-20T22:23:49,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/be333753cb3b484b8b97747bb4d9ad88 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/be333753cb3b484b8b97747bb4d9ad88 2024-11-20T22:23:49,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/bb0e115ac3bf4e5aa60a97321ed7f191 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/bb0e115ac3bf4e5aa60a97321ed7f191 2024-11-20T22:23:49,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/e3a5324926754aba847a3e7caa293a39 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/e3a5324926754aba847a3e7caa293a39 2024-11-20T22:23:49,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/38bc3d99326c4adeaa2f6510f68dfab0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/38bc3d99326c4adeaa2f6510f68dfab0 2024-11-20T22:23:49,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/fa890bd7c4874e04a6ada1c7d40716e7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/fa890bd7c4874e04a6ada1c7d40716e7 2024-11-20T22:23:49,439 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c214153209714a27b07a2c0da3b9615d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/c214153209714a27b07a2c0da3b9615d 2024-11-20T22:23:49,442 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/2e164f9cda8d40728560ffbcb5f3a4a5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/2e164f9cda8d40728560ffbcb5f3a4a5 2024-11-20T22:23:49,444 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/1e52e2fd37b74d8080e791c00b2c1d0f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/1e52e2fd37b74d8080e791c00b2c1d0f 2024-11-20T22:23:49,448 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/fa58ea8b2f37475db3e46fdf16768609 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/fa58ea8b2f37475db3e46fdf16768609 2024-11-20T22:23:49,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/0c252c96ae304a0f8db3fdff3afee35b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/0c252c96ae304a0f8db3fdff3afee35b 2024-11-20T22:23:49,484 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/recovered.edits/485.seqid, newMaxSeqId=485, maxSeqId=1 2024-11-20T22:23:49,490 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb. 2024-11-20T22:23:49,490 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for 36a256a4871d36dc6632cf0cdb971cbb: 2024-11-20T22:23:49,493 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed 36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:49,494 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=36a256a4871d36dc6632cf0cdb971cbb, regionState=CLOSED 2024-11-20T22:23:49,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-20T22:23:49,503 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure 36a256a4871d36dc6632cf0cdb971cbb, server=6365a1e51efd,44631,1732141399950 in 2.3540 sec 2024-11-20T22:23:49,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-20T22:23:49,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=36a256a4871d36dc6632cf0cdb971cbb, UNASSIGN in 2.3630 sec 2024-11-20T22:23:49,508 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-20T22:23:49,508 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.3710 sec 2024-11-20T22:23:49,510 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141429509"}]},"ts":"1732141429509"} 2024-11-20T22:23:49,512 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T22:23:49,524 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T22:23:49,529 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.4160 sec 2024-11-20T22:23:49,612 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T22:23:51,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T22:23:51,228 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-20T22:23:51,233 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T22:23:51,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:23:51,240 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:23:51,242 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=38, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:23:51,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T22:23:51,247 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:51,260 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/recovered.edits] 2024-11-20T22:23:51,269 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/46cb1ce697384a3a97b6b67849380ad9 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/46cb1ce697384a3a97b6b67849380ad9 2024-11-20T22:23:51,274 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/5bdf3ecb849641869cdd9f9f391b90d9 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/5bdf3ecb849641869cdd9f9f391b90d9 2024-11-20T22:23:51,276 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/c7e36281ffd140cf9175bf39c815c5f2 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/c7e36281ffd140cf9175bf39c815c5f2 2024-11-20T22:23:51,282 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/f15104687abf40e0b1721f48dcdc1a46 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/A/f15104687abf40e0b1721f48dcdc1a46 2024-11-20T22:23:51,292 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/1b9b6ebe354e4031bc0d2a18ed8784ca to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/1b9b6ebe354e4031bc0d2a18ed8784ca 2024-11-20T22:23:51,297 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/6dd69b1a9ef6400cbb42888fcc32478c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/6dd69b1a9ef6400cbb42888fcc32478c 2024-11-20T22:23:51,300 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/cbcdb1cbd3db43d1b36c570a79685230 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/cbcdb1cbd3db43d1b36c570a79685230 2024-11-20T22:23:51,305 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/f0398eafb0134d75986dc2295432cd23 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/B/f0398eafb0134d75986dc2295432cd23 2024-11-20T22:23:51,310 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/139387d95a5d4417aab00a32487c8e6c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/139387d95a5d4417aab00a32487c8e6c 2024-11-20T22:23:51,313 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/230f2cc82fdd4c9a812bd1a6a8b055d1 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/230f2cc82fdd4c9a812bd1a6a8b055d1 2024-11-20T22:23:51,317 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/b4e014f1ea8c45e6a1afd3b78e42c2be to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/b4e014f1ea8c45e6a1afd3b78e42c2be 2024-11-20T22:23:51,324 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/b56cf9cf171b4fcd85a97964eed0aaca to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/C/b56cf9cf171b4fcd85a97964eed0aaca 2024-11-20T22:23:51,330 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/recovered.edits/485.seqid to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb/recovered.edits/485.seqid 2024-11-20T22:23:51,331 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/36a256a4871d36dc6632cf0cdb971cbb 2024-11-20T22:23:51,332 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T22:23:51,340 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=38, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:23:51,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T22:23:51,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-20T22:23:51,371 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T22:23:51,450 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T22:23:51,452 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=38, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:23:51,452 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T22:23:51,453 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732141431452"}]},"ts":"9223372036854775807"} 2024-11-20T22:23:51,467 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T22:23:51,467 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 36a256a4871d36dc6632cf0cdb971cbb, NAME => 'TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T22:23:51,467 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T22:23:51,467 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732141431467"}]},"ts":"9223372036854775807"} 2024-11-20T22:23:51,477 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T22:23:51,529 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=38, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:23:51,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 295 msec 2024-11-20T22:23:51,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T22:23:51,552 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-11-20T22:23:51,586 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=240 (was 219) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: hconnection-0x42e0417a-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x42e0417a-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1764968301_22 at /127.0.0.1:47398 [Waiting for operation #254] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x42e0417a-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_667732343_22 at /127.0.0.1:45962 [Waiting for operation #125] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1764968301_22 at /127.0.0.1:47584 [Waiting for operation #242] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;6365a1e51efd:44631-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x42e0417a-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=458 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=814 (was 458) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2362 (was 2648) 2024-11-20T22:23:51,604 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=240, OpenFileDescriptor=458, MaxFileDescriptor=1048576, SystemLoadAverage=814, ProcessCount=11, AvailableMemoryMB=2361 2024-11-20T22:23:51,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:23:51,608 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:23:51,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T22:23:51,622 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:23:51,623 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 39 2024-11-20T22:23:51,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T22:23:51,624 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:51,626 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:23:51,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741938_1114 (size=960) 2024-11-20T22:23:51,673 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a 2024-11-20T22:23:51,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T22:23:51,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741939_1115 (size=53) 2024-11-20T22:23:51,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:51,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 328fe2cc950802b391cb9dd7043a44b7, disabling compactions & flushes 2024-11-20T22:23:51,735 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:51,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:51,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. after waiting 0 ms 2024-11-20T22:23:51,736 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:51,736 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:51,736 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:51,737 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:23:51,737 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732141431737"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141431737"}]},"ts":"1732141431737"} 2024-11-20T22:23:51,739 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:23:51,742 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:23:51,742 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141431742"}]},"ts":"1732141431742"} 2024-11-20T22:23:51,745 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T22:23:51,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=328fe2cc950802b391cb9dd7043a44b7, ASSIGN}] 2024-11-20T22:23:51,769 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=328fe2cc950802b391cb9dd7043a44b7, ASSIGN 2024-11-20T22:23:51,770 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=328fe2cc950802b391cb9dd7043a44b7, ASSIGN; state=OFFLINE, location=6365a1e51efd,44631,1732141399950; forceNewPlan=false, retain=false 2024-11-20T22:23:51,920 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=328fe2cc950802b391cb9dd7043a44b7, regionState=OPENING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:51,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; OpenRegionProcedure 328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:23:51,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T22:23:52,075 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:52,106 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:52,107 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7285): Opening region: {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:23:52,107 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:52,107 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:52,107 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7327): checking encryption for 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:52,108 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7330): checking classloading for 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:52,137 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:52,140 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:23:52,140 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 328fe2cc950802b391cb9dd7043a44b7 columnFamilyName A 2024-11-20T22:23:52,140 DEBUG [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:52,147 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.HStore(327): Store=328fe2cc950802b391cb9dd7043a44b7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:52,147 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:52,152 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:23:52,153 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 328fe2cc950802b391cb9dd7043a44b7 columnFamilyName B 2024-11-20T22:23:52,153 DEBUG [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:52,154 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.HStore(327): Store=328fe2cc950802b391cb9dd7043a44b7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:52,154 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:52,157 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:23:52,157 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 328fe2cc950802b391cb9dd7043a44b7 columnFamilyName C 2024-11-20T22:23:52,157 DEBUG [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:52,158 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.HStore(327): Store=328fe2cc950802b391cb9dd7043a44b7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:52,159 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:52,160 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:52,161 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:52,163 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:23:52,166 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1085): writing seq id for 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:52,179 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:23:52,182 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1102): Opened 328fe2cc950802b391cb9dd7043a44b7; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69058855, jitterRate=0.029057130217552185}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:23:52,190 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1001): Region open journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:52,191 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., pid=41, masterSystemTime=1732141432075 2024-11-20T22:23:52,195 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:52,195 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:52,197 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=328fe2cc950802b391cb9dd7043a44b7, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:52,215 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-20T22:23:52,215 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; OpenRegionProcedure 328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 in 279 msec 2024-11-20T22:23:52,217 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-20T22:23:52,217 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=328fe2cc950802b391cb9dd7043a44b7, ASSIGN in 449 msec 2024-11-20T22:23:52,225 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:23:52,225 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141432225"}]},"ts":"1732141432225"} 2024-11-20T22:23:52,227 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T22:23:52,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T22:23:52,249 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:23:52,253 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 641 msec 2024-11-20T22:23:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T22:23:52,745 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-11-20T22:23:52,748 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3242ee55 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d3b0c59 2024-11-20T22:23:52,792 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a1a4a6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:52,795 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:52,805 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38136, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:52,831 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:23:52,855 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33432, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:23:52,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:23:52,867 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:23:52,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=42, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T22:23:52,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741940_1116 (size=996) 2024-11-20T22:23:52,940 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T22:23:52,940 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T22:23:52,945 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:23:52,973 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=328fe2cc950802b391cb9dd7043a44b7, REOPEN/MOVE}] 2024-11-20T22:23:52,975 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=328fe2cc950802b391cb9dd7043a44b7, REOPEN/MOVE 2024-11-20T22:23:52,976 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=328fe2cc950802b391cb9dd7043a44b7, regionState=CLOSING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:52,979 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:23:52,979 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE; CloseRegionProcedure 328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:23:53,131 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:53,133 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(124): Close 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:53,133 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:23:53,133 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1681): Closing 328fe2cc950802b391cb9dd7043a44b7, disabling compactions & flushes 2024-11-20T22:23:53,133 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:53,133 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:53,134 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. after waiting 0 ms 2024-11-20T22:23:53,134 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:53,146 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T22:23:53,147 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:53,147 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1635): Region close journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:53,147 WARN [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegionServer(3786): Not adding moved region record: 328fe2cc950802b391cb9dd7043a44b7 to self. 2024-11-20T22:23:53,156 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(170): Closed 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:53,160 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=328fe2cc950802b391cb9dd7043a44b7, regionState=CLOSED 2024-11-20T22:23:53,175 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-20T22:23:53,175 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; CloseRegionProcedure 328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 in 182 msec 2024-11-20T22:23:53,179 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=328fe2cc950802b391cb9dd7043a44b7, REOPEN/MOVE; state=CLOSED, location=6365a1e51efd,44631,1732141399950; forceNewPlan=false, retain=true 2024-11-20T22:23:53,330 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=328fe2cc950802b391cb9dd7043a44b7, regionState=OPENING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:53,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=44, state=RUNNABLE; OpenRegionProcedure 328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:23:53,488 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:53,494 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:53,495 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7285): Opening region: {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:23:53,495 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:53,495 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:53,495 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7327): checking encryption for 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:53,495 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7330): checking classloading for 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:53,510 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:53,512 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:23:53,519 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 328fe2cc950802b391cb9dd7043a44b7 columnFamilyName A 2024-11-20T22:23:53,523 DEBUG [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:53,524 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.HStore(327): Store=328fe2cc950802b391cb9dd7043a44b7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:53,528 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:53,530 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:23:53,530 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 328fe2cc950802b391cb9dd7043a44b7 columnFamilyName B 2024-11-20T22:23:53,530 DEBUG [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:53,531 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.HStore(327): Store=328fe2cc950802b391cb9dd7043a44b7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:53,531 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:53,532 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:23:53,532 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 328fe2cc950802b391cb9dd7043a44b7 columnFamilyName C 2024-11-20T22:23:53,532 DEBUG [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:53,534 INFO [StoreOpener-328fe2cc950802b391cb9dd7043a44b7-1 {}] regionserver.HStore(327): Store=328fe2cc950802b391cb9dd7043a44b7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:53,535 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:53,536 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:53,537 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:53,540 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:23:53,545 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1085): writing seq id for 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:53,551 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1102): Opened 328fe2cc950802b391cb9dd7043a44b7; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73642891, jitterRate=0.09736458957195282}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:23:53,554 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1001): Region open journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:53,556 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., pid=46, masterSystemTime=1732141433488 2024-11-20T22:23:53,559 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:53,559 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:53,560 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=328fe2cc950802b391cb9dd7043a44b7, regionState=OPEN, openSeqNum=5, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:53,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=44 2024-11-20T22:23:53,568 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=44, state=SUCCESS; OpenRegionProcedure 328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 in 228 msec 2024-11-20T22:23:53,577 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-20T22:23:53,577 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=328fe2cc950802b391cb9dd7043a44b7, REOPEN/MOVE in 595 msec 2024-11-20T22:23:53,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-20T22:23:53,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 635 msec 2024-11-20T22:23:53,591 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 716 msec 2024-11-20T22:23:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-11-20T22:23:53,602 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2aa409d0 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@515fd839 2024-11-20T22:23:53,668 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d006bed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:53,671 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53af6163 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@748ab582 2024-11-20T22:23:53,721 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f4859f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:53,724 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x15736fcc to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@35b51e5d 2024-11-20T22:23:53,775 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1eb823f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:53,778 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32168855 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@74be9bc0 2024-11-20T22:23:53,822 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a0312cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:53,824 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40832d66 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@776c0cb7 2024-11-20T22:23:53,849 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@555bfdff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:53,851 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3ec46f90 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@347ad9b2 2024-11-20T22:23:53,888 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d5e0e3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:53,890 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7f63b68c to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d36579b 2024-11-20T22:23:53,907 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70f48df4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:53,909 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x473f181f to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@681a05ec 2024-11-20T22:23:53,941 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cd5be36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:53,944 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x768577a2 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e0829fb 2024-11-20T22:23:53,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fbd1a02, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:54,020 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:54,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-20T22:23:54,023 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:54,024 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:54,024 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:54,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:23:54,039 DEBUG [hconnection-0x1ef77b3b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:54,040 DEBUG [hconnection-0x13a49384-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:54,044 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38152, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:54,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:23:54,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:23:54,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:54,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:23:54,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:54,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:23:54,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:54,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:54,097 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:54,103 DEBUG [hconnection-0x705d529f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:54,105 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38164, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:54,117 DEBUG [hconnection-0x17d388d9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:54,120 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:54,123 DEBUG [hconnection-0x13381b1e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:54,125 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:54,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141494133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141494130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141494135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141494137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:23:54,167 DEBUG [hconnection-0x67328c8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:54,168 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38186, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:54,178 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:54,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:54,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:54,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:54,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,181 DEBUG [hconnection-0x34c2a382-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:54,182 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38194, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:54,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141494187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,226 DEBUG [hconnection-0x7ff3aacc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:54,228 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:54,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141494237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141494239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141494241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141494242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,260 DEBUG [hconnection-0x5b5182bd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:54,261 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38202, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:54,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112025e07fbe50e14b09b792eb6967209265_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141434074/Put/seqid=0 2024-11-20T22:23:54,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141494291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741941_1117 (size=12154) 2024-11-20T22:23:54,335 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:54,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:54,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:54,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:54,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,337 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:54,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:23:54,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,348 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112025e07fbe50e14b09b792eb6967209265_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112025e07fbe50e14b09b792eb6967209265_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:54,350 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/8b86505a3ed342979bc81cd786172a1e, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:54,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/8b86505a3ed342979bc81cd786172a1e is 175, key is test_row_0/A:col10/1732141434074/Put/seqid=0 2024-11-20T22:23:54,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741942_1118 (size=30955) 2024-11-20T22:23:54,417 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/8b86505a3ed342979bc81cd786172a1e 2024-11-20T22:23:54,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141494442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141494444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141494445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141494446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/08b43a97f99241baba323920fc484273 is 50, key is test_row_0/B:col10/1732141434074/Put/seqid=0 2024-11-20T22:23:54,505 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:54,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:54,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:54,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:54,506 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141494524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741943_1119 (size=12001) 2024-11-20T22:23:54,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/08b43a97f99241baba323920fc484273 2024-11-20T22:23:54,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/4f7bf5807c974acc8984d86935e5fd53 is 50, key is test_row_0/C:col10/1732141434074/Put/seqid=0 2024-11-20T22:23:54,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:23:54,659 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:54,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:54,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:54,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:54,660 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741944_1120 (size=12001) 2024-11-20T22:23:54,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141494752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141494770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141494791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141494796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,813 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:54,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:54,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:54,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:54,815 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141494830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,968 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:54,969 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:54,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:54,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:54,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:54,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:54,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,071 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/4f7bf5807c974acc8984d86935e5fd53 2024-11-20T22:23:55,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/8b86505a3ed342979bc81cd786172a1e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8b86505a3ed342979bc81cd786172a1e 2024-11-20T22:23:55,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8b86505a3ed342979bc81cd786172a1e, entries=150, sequenceid=15, filesize=30.2 K 2024-11-20T22:23:55,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/08b43a97f99241baba323920fc484273 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/08b43a97f99241baba323920fc484273 2024-11-20T22:23:55,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/08b43a97f99241baba323920fc484273, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T22:23:55,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/4f7bf5807c974acc8984d86935e5fd53 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4f7bf5807c974acc8984d86935e5fd53 2024-11-20T22:23:55,122 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,123 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:55,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:55,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:55,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:55,123 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,136 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T22:23:55,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4f7bf5807c974acc8984d86935e5fd53, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T22:23:55,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 328fe2cc950802b391cb9dd7043a44b7 in 1053ms, sequenceid=15, compaction requested=false 2024-11-20T22:23:55,138 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T22:23:55,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:55,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:23:55,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T22:23:55,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:23:55,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:55,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:23:55,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:55,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:23:55,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:55,276 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:55,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:55,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:55,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:55,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:55,309 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207b32a557721744139983f580681e81c2_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141434130/Put/seqid=0 2024-11-20T22:23:55,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141495321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141495324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141495326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141495337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741945_1121 (size=14594) 2024-11-20T22:23:55,354 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:55,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141495339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,362 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207b32a557721744139983f580681e81c2_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207b32a557721744139983f580681e81c2_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:55,365 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/a9d2f0ec98b04394ab5cb077a918c92a, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:55,365 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/a9d2f0ec98b04394ab5cb077a918c92a is 175, key is test_row_0/A:col10/1732141434130/Put/seqid=0 2024-11-20T22:23:55,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,430 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:55,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:55,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:55,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:55,431 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741946_1122 (size=39549) 2024-11-20T22:23:55,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,434 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/a9d2f0ec98b04394ab5cb077a918c92a 2024-11-20T22:23:55,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141495438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141495441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141495443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141495457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/ae49ea93baba4b248688f960b642b947 is 50, key is test_row_0/B:col10/1732141434130/Put/seqid=0 2024-11-20T22:23:55,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741947_1123 (size=12001) 2024-11-20T22:23:55,584 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:55,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:55,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:55,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:55,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141495641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141495653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141495660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141495661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,737 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:55,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:55,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:55,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:55,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,896 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:55,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:55,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:55,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:55,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:55,911 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/ae49ea93baba4b248688f960b642b947 2024-11-20T22:23:55,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/804a240d580248589fc9453f005b879f is 50, key is test_row_0/C:col10/1732141434130/Put/seqid=0 2024-11-20T22:23:55,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141495961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141495952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141495966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:55,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141495970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741948_1124 (size=12001) 2024-11-20T22:23:56,051 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:56,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:56,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,052 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:23:56,208 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:56,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:56,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,209 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,363 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:56,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:56,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141496364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,425 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/804a240d580248589fc9453f005b879f 2024-11-20T22:23:56,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/a9d2f0ec98b04394ab5cb077a918c92a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a9d2f0ec98b04394ab5cb077a918c92a 2024-11-20T22:23:56,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a9d2f0ec98b04394ab5cb077a918c92a, entries=200, sequenceid=42, filesize=38.6 K 2024-11-20T22:23:56,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/ae49ea93baba4b248688f960b642b947 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/ae49ea93baba4b248688f960b642b947 2024-11-20T22:23:56,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/ae49ea93baba4b248688f960b642b947, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T22:23:56,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/804a240d580248589fc9453f005b879f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/804a240d580248589fc9453f005b879f 2024-11-20T22:23:56,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/804a240d580248589fc9453f005b879f, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T22:23:56,489 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 328fe2cc950802b391cb9dd7043a44b7 in 1219ms, sequenceid=42, compaction requested=false 2024-11-20T22:23:56,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:56,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:56,495 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:23:56,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:23:56,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:56,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:23:56,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:56,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:23:56,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:56,518 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:56,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:56,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,544 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112046dcb017240a4d0ea490094e7520854f_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141435323/Put/seqid=0 2024-11-20T22:23:56,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741949_1125 (size=17034) 2024-11-20T22:23:56,595 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:56,602 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112046dcb017240a4d0ea490094e7520854f_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112046dcb017240a4d0ea490094e7520854f_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:56,603 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/82301e7580f943b0a372da152341d34b, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:56,604 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/82301e7580f943b0a372da152341d34b is 175, key is test_row_0/A:col10/1732141435323/Put/seqid=0 2024-11-20T22:23:56,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741950_1126 (size=48139) 2024-11-20T22:23:56,654 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/82301e7580f943b0a372da152341d34b 2024-11-20T22:23:56,672 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:56,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:56,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,676 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141496665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141496675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141496676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141496678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/c06311a76c7547a6a518c6f23679f144 is 50, key is test_row_0/B:col10/1732141435323/Put/seqid=0 2024-11-20T22:23:56,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741951_1127 (size=12001) 2024-11-20T22:23:56,771 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/c06311a76c7547a6a518c6f23679f144 2024-11-20T22:23:56,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141496781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141496782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141496784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/7dd2dd43aa7d4e2fa98248d925dd3171 is 50, key is test_row_0/C:col10/1732141435323/Put/seqid=0 2024-11-20T22:23:56,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141496784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,818 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T22:23:56,820 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T22:23:56,829 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,830 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:56,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:56,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,831 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741952_1128 (size=12001) 2024-11-20T22:23:56,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/7dd2dd43aa7d4e2fa98248d925dd3171 2024-11-20T22:23:56,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/82301e7580f943b0a372da152341d34b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/82301e7580f943b0a372da152341d34b 2024-11-20T22:23:56,852 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/82301e7580f943b0a372da152341d34b, entries=250, sequenceid=53, filesize=47.0 K 2024-11-20T22:23:56,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/c06311a76c7547a6a518c6f23679f144 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/c06311a76c7547a6a518c6f23679f144 2024-11-20T22:23:56,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/c06311a76c7547a6a518c6f23679f144, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T22:23:56,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/7dd2dd43aa7d4e2fa98248d925dd3171 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7dd2dd43aa7d4e2fa98248d925dd3171 2024-11-20T22:23:56,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7dd2dd43aa7d4e2fa98248d925dd3171, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T22:23:56,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 328fe2cc950802b391cb9dd7043a44b7 in 381ms, sequenceid=53, compaction requested=true 2024-11-20T22:23:56,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:56,877 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:56,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:56,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:56,877 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:56,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:56,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:56,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:56,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:56,879 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:56,879 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:56,879 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/A is initiating minor compaction (all files) 2024-11-20T22:23:56,879 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/B is initiating minor compaction (all files) 2024-11-20T22:23:56,879 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/A in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,879 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/B in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,880 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8b86505a3ed342979bc81cd786172a1e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a9d2f0ec98b04394ab5cb077a918c92a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/82301e7580f943b0a372da152341d34b] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=115.9 K 2024-11-20T22:23:56,880 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/08b43a97f99241baba323920fc484273, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/ae49ea93baba4b248688f960b642b947, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/c06311a76c7547a6a518c6f23679f144] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=35.2 K 2024-11-20T22:23:56,880 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,880 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8b86505a3ed342979bc81cd786172a1e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a9d2f0ec98b04394ab5cb077a918c92a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/82301e7580f943b0a372da152341d34b] 2024-11-20T22:23:56,880 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 08b43a97f99241baba323920fc484273, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141434070 2024-11-20T22:23:56,881 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ae49ea93baba4b248688f960b642b947, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732141434123 2024-11-20T22:23:56,881 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b86505a3ed342979bc81cd786172a1e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141434070 2024-11-20T22:23:56,883 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c06311a76c7547a6a518c6f23679f144, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141435323 2024-11-20T22:23:56,883 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9d2f0ec98b04394ab5cb077a918c92a, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732141434123 2024-11-20T22:23:56,885 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82301e7580f943b0a372da152341d34b, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141435293 2024-11-20T22:23:56,920 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:56,926 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#B#compaction#108 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:56,935 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/ee4e2b5c702143578206564a404cded6 is 50, key is test_row_0/B:col10/1732141435323/Put/seqid=0 2024-11-20T22:23:56,955 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120587c56703d5e472183a6f49aa5e44eb4_328fe2cc950802b391cb9dd7043a44b7 store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:56,968 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120587c56703d5e472183a6f49aa5e44eb4_328fe2cc950802b391cb9dd7043a44b7, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:56,969 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120587c56703d5e472183a6f49aa5e44eb4_328fe2cc950802b391cb9dd7043a44b7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:56,984 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:23:56,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:23:56,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:56,985 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:23:56,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:23:56,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:56,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:23:56,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:56,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:23:56,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:57,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:57,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:23:57,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141497062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:57,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141497062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:57,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741954_1130 (size=4469) 2024-11-20T22:23:57,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141497068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:57,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741953_1129 (size=12104) 2024-11-20T22:23:57,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141497092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:57,104 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/ee4e2b5c702143578206564a404cded6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/ee4e2b5c702143578206564a404cded6 2024-11-20T22:23:57,115 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/B of 328fe2cc950802b391cb9dd7043a44b7 into ee4e2b5c702143578206564a404cded6(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:57,115 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:57,115 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/B, priority=13, startTime=1732141436877; duration=0sec 2024-11-20T22:23:57,115 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:57,115 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:B 2024-11-20T22:23:57,115 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:57,117 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:57,117 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/C is initiating minor compaction (all files) 2024-11-20T22:23:57,117 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/C in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:57,118 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4f7bf5807c974acc8984d86935e5fd53, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/804a240d580248589fc9453f005b879f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7dd2dd43aa7d4e2fa98248d925dd3171] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=35.2 K 2024-11-20T22:23:57,118 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f7bf5807c974acc8984d86935e5fd53, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141434070 2024-11-20T22:23:57,119 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 804a240d580248589fc9453f005b879f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732141434123 2024-11-20T22:23:57,119 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 7dd2dd43aa7d4e2fa98248d925dd3171, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141435323 2024-11-20T22:23:57,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f535fa2e8c0143b8a7d3188f3c48e652_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141436666/Put/seqid=0 2024-11-20T22:23:57,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141497165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:57,185 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#C#compaction#111 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:57,186 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/4e82da334502494ba04f340e037c697a is 50, key is test_row_0/C:col10/1732141435323/Put/seqid=0 2024-11-20T22:23:57,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741955_1131 (size=12154) 2024-11-20T22:23:57,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:57,227 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f535fa2e8c0143b8a7d3188f3c48e652_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f535fa2e8c0143b8a7d3188f3c48e652_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:57,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/f8a69baae4d74b0cbe2334e2af76ccfc, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:57,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/f8a69baae4d74b0cbe2334e2af76ccfc is 175, key is test_row_0/A:col10/1732141436666/Put/seqid=0 2024-11-20T22:23:57,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741956_1132 (size=12104) 2024-11-20T22:23:57,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741957_1133 (size=30955) 2024-11-20T22:23:57,338 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/4e82da334502494ba04f340e037c697a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4e82da334502494ba04f340e037c697a 2024-11-20T22:23:57,355 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/C of 328fe2cc950802b391cb9dd7043a44b7 into 4e82da334502494ba04f340e037c697a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:57,355 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:57,355 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/C, priority=13, startTime=1732141436878; duration=0sec 2024-11-20T22:23:57,355 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:57,355 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:C 2024-11-20T22:23:57,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141497368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:57,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141497370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:57,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141497382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:57,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141497397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:57,468 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#A#compaction#109 average throughput is 0.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:57,471 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/8526e2281bf84975be38065040f37de0 is 175, key is test_row_0/A:col10/1732141435323/Put/seqid=0 2024-11-20T22:23:57,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741958_1134 (size=31058) 2024-11-20T22:23:57,519 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/8526e2281bf84975be38065040f37de0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8526e2281bf84975be38065040f37de0 2024-11-20T22:23:57,533 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/A of 328fe2cc950802b391cb9dd7043a44b7 into 8526e2281bf84975be38065040f37de0(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:57,534 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:57,534 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/A, priority=13, startTime=1732141436877; duration=0sec 2024-11-20T22:23:57,534 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:57,534 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:A 2024-11-20T22:23:57,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141497683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:57,726 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/f8a69baae4d74b0cbe2334e2af76ccfc 2024-11-20T22:23:57,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/f4dc4371d6b64b62b5813a27771e4597 is 50, key is test_row_0/B:col10/1732141436666/Put/seqid=0 2024-11-20T22:23:57,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741959_1135 (size=12001) 2024-11-20T22:23:57,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141497889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:57,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141497893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:57,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141497903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:58,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:23:58,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141498192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:58,206 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/f4dc4371d6b64b62b5813a27771e4597 2024-11-20T22:23:58,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/1a2ed107dc9c45e59f23a2023fa6d007 is 50, key is test_row_0/C:col10/1732141436666/Put/seqid=0 2024-11-20T22:23:58,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741960_1136 (size=12001) 2024-11-20T22:23:58,279 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/1a2ed107dc9c45e59f23a2023fa6d007 2024-11-20T22:23:58,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/f8a69baae4d74b0cbe2334e2af76ccfc as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/f8a69baae4d74b0cbe2334e2af76ccfc 2024-11-20T22:23:58,328 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/f8a69baae4d74b0cbe2334e2af76ccfc, entries=150, sequenceid=78, filesize=30.2 K 2024-11-20T22:23:58,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/f4dc4371d6b64b62b5813a27771e4597 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f4dc4371d6b64b62b5813a27771e4597 2024-11-20T22:23:58,347 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f4dc4371d6b64b62b5813a27771e4597, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T22:23:58,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/1a2ed107dc9c45e59f23a2023fa6d007 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/1a2ed107dc9c45e59f23a2023fa6d007 2024-11-20T22:23:58,373 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/1a2ed107dc9c45e59f23a2023fa6d007, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T22:23:58,375 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 328fe2cc950802b391cb9dd7043a44b7 in 1390ms, sequenceid=78, compaction requested=false 2024-11-20T22:23:58,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:58,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:58,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-20T22:23:58,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-20T22:23:58,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-20T22:23:58,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.3530 sec 2024-11-20T22:23:58,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 4.3600 sec 2024-11-20T22:23:58,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:23:58,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:23:58,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:58,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:23:58,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:58,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:23:58,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:58,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:58,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200e727923923d499491c223102bf7a42c_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141437060/Put/seqid=0 2024-11-20T22:23:58,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741961_1137 (size=14594) 2024-11-20T22:23:58,474 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:58,482 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200e727923923d499491c223102bf7a42c_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200e727923923d499491c223102bf7a42c_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:58,485 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/22030bc21d6643869411078cbc27ffa5, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:58,486 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/22030bc21d6643869411078cbc27ffa5 is 175, key is test_row_0/A:col10/1732141437060/Put/seqid=0 2024-11-20T22:23:58,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741962_1138 (size=39549) 2024-11-20T22:23:58,529 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/22030bc21d6643869411078cbc27ffa5 2024-11-20T22:23:58,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/760f00717979453d9d55ac19fa092034 is 50, key is test_row_0/B:col10/1732141437060/Put/seqid=0 2024-11-20T22:23:58,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741963_1139 (size=12001) 2024-11-20T22:23:58,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141498656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:58,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141498764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:58,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141498900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:58,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141498910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:58,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141498910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:58,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141498970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:58,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/760f00717979453d9d55ac19fa092034 2024-11-20T22:23:59,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/243b9853092b4bc58c5648b9a5c8cec8 is 50, key is test_row_0/C:col10/1732141437060/Put/seqid=0 2024-11-20T22:23:59,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741964_1140 (size=12001) 2024-11-20T22:23:59,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141499211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:59,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:59,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141499282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:59,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/243b9853092b4bc58c5648b9a5c8cec8 2024-11-20T22:23:59,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/22030bc21d6643869411078cbc27ffa5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/22030bc21d6643869411078cbc27ffa5 2024-11-20T22:23:59,577 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/22030bc21d6643869411078cbc27ffa5, entries=200, sequenceid=93, filesize=38.6 K 2024-11-20T22:23:59,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/760f00717979453d9d55ac19fa092034 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/760f00717979453d9d55ac19fa092034 2024-11-20T22:23:59,610 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/760f00717979453d9d55ac19fa092034, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T22:23:59,612 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T22:23:59,612 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T22:23:59,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/243b9853092b4bc58c5648b9a5c8cec8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/243b9853092b4bc58c5648b9a5c8cec8 2024-11-20T22:23:59,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/243b9853092b4bc58c5648b9a5c8cec8, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T22:23:59,640 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 328fe2cc950802b391cb9dd7043a44b7 in 1249ms, sequenceid=93, compaction requested=true 2024-11-20T22:23:59,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:59,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:59,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:59,641 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:59,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:59,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:59,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:59,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:23:59,641 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:59,646 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:59,646 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:59,646 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/A is initiating minor compaction (all files) 2024-11-20T22:23:59,646 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/B is initiating minor compaction (all files) 2024-11-20T22:23:59,646 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/A in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:59,646 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/B in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:59,646 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/ee4e2b5c702143578206564a404cded6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f4dc4371d6b64b62b5813a27771e4597, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/760f00717979453d9d55ac19fa092034] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=35.3 K 2024-11-20T22:23:59,646 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8526e2281bf84975be38065040f37de0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/f8a69baae4d74b0cbe2334e2af76ccfc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/22030bc21d6643869411078cbc27ffa5] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=99.2 K 2024-11-20T22:23:59,646 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:59,646 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8526e2281bf84975be38065040f37de0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/f8a69baae4d74b0cbe2334e2af76ccfc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/22030bc21d6643869411078cbc27ffa5] 2024-11-20T22:23:59,647 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee4e2b5c702143578206564a404cded6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141435323 2024-11-20T22:23:59,648 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 8526e2281bf84975be38065040f37de0, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141435323 2024-11-20T22:23:59,648 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4dc4371d6b64b62b5813a27771e4597, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732141436666 2024-11-20T22:23:59,648 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 760f00717979453d9d55ac19fa092034, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141437047 2024-11-20T22:23:59,648 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting f8a69baae4d74b0cbe2334e2af76ccfc, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732141436666 2024-11-20T22:23:59,649 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 22030bc21d6643869411078cbc27ffa5, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141437047 2024-11-20T22:23:59,689 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#B#compaction#117 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:59,689 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/f4fc9f6dcfab4e2cb47e5f86b30ca181 is 50, key is test_row_0/B:col10/1732141437060/Put/seqid=0 2024-11-20T22:23:59,701 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:59,725 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112094ade4d3dcfc40c89e9c73c320574c2c_328fe2cc950802b391cb9dd7043a44b7 store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:59,729 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112094ade4d3dcfc40c89e9c73c320574c2c_328fe2cc950802b391cb9dd7043a44b7, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:59,729 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112094ade4d3dcfc40c89e9c73c320574c2c_328fe2cc950802b391cb9dd7043a44b7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:23:59,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741965_1141 (size=12207) 2024-11-20T22:23:59,777 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/f4fc9f6dcfab4e2cb47e5f86b30ca181 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f4fc9f6dcfab4e2cb47e5f86b30ca181 2024-11-20T22:23:59,792 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:23:59,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:23:59,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:59,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:23:59,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:59,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:23:59,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:23:59,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741966_1142 (size=4469) 2024-11-20T22:23:59,818 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#A#compaction#118 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:59,819 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/100c3d6e913c4c33ab6c851bc3ded418 is 175, key is test_row_0/A:col10/1732141437060/Put/seqid=0 2024-11-20T22:23:59,827 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/B of 328fe2cc950802b391cb9dd7043a44b7 into f4fc9f6dcfab4e2cb47e5f86b30ca181(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:59,827 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:59,827 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/B, priority=13, startTime=1732141439641; duration=0sec 2024-11-20T22:23:59,827 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:59,827 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:B 2024-11-20T22:23:59,827 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:59,831 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:59,831 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/C is initiating minor compaction (all files) 2024-11-20T22:23:59,831 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/C in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:23:59,832 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4e82da334502494ba04f340e037c697a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/1a2ed107dc9c45e59f23a2023fa6d007, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/243b9853092b4bc58c5648b9a5c8cec8] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=35.3 K 2024-11-20T22:23:59,835 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e82da334502494ba04f340e037c697a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141435323 2024-11-20T22:23:59,839 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a2ed107dc9c45e59f23a2023fa6d007, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732141436666 2024-11-20T22:23:59,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d6ea4bc6d86d4825811ff4e60293bd22_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141439790/Put/seqid=0 2024-11-20T22:23:59,847 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 243b9853092b4bc58c5648b9a5c8cec8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141437047 2024-11-20T22:23:59,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741967_1143 (size=31161) 2024-11-20T22:23:59,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141499891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:23:59,914 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/100c3d6e913c4c33ab6c851bc3ded418 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/100c3d6e913c4c33ab6c851bc3ded418 2024-11-20T22:23:59,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741968_1144 (size=14594) 2024-11-20T22:23:59,925 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#C#compaction#120 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:59,926 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/9bbe160310f948eebbaab8e5bc5ca8b0 is 50, key is test_row_0/C:col10/1732141437060/Put/seqid=0 2024-11-20T22:23:59,928 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/A of 328fe2cc950802b391cb9dd7043a44b7 into 100c3d6e913c4c33ab6c851bc3ded418(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:59,928 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:23:59,928 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/A, priority=13, startTime=1732141439641; duration=0sec 2024-11-20T22:23:59,928 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:59,928 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:A 2024-11-20T22:23:59,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741969_1145 (size=12207) 2024-11-20T22:24:00,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141500005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:00,045 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/9bbe160310f948eebbaab8e5bc5ca8b0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/9bbe160310f948eebbaab8e5bc5ca8b0 2024-11-20T22:24:00,087 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/C of 328fe2cc950802b391cb9dd7043a44b7 into 9bbe160310f948eebbaab8e5bc5ca8b0(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:00,087 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:00,087 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/C, priority=13, startTime=1732141439641; duration=0sec 2024-11-20T22:24:00,087 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:00,087 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:C 2024-11-20T22:24:00,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141500218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:00,322 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:00,327 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d6ea4bc6d86d4825811ff4e60293bd22_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d6ea4bc6d86d4825811ff4e60293bd22_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:00,329 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/fa95887b1c6f4e9b9e214f2b899389b0, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:00,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/fa95887b1c6f4e9b9e214f2b899389b0 is 175, key is test_row_0/A:col10/1732141439790/Put/seqid=0 2024-11-20T22:24:00,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741970_1146 (size=39549) 2024-11-20T22:24:00,417 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/fa95887b1c6f4e9b9e214f2b899389b0 2024-11-20T22:24:00,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/26eb7e13e5494c27a96ce8ecea9e6bac is 50, key is test_row_0/B:col10/1732141439790/Put/seqid=0 2024-11-20T22:24:00,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141500527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:00,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741971_1147 (size=12001) 2024-11-20T22:24:00,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/26eb7e13e5494c27a96ce8ecea9e6bac 2024-11-20T22:24:00,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/cb6694a05da1479bb0028fb9916f5620 is 50, key is test_row_0/C:col10/1732141439790/Put/seqid=0 2024-11-20T22:24:00,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741972_1148 (size=12001) 2024-11-20T22:24:00,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141500926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:00,932 DEBUG [Thread-586 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4255 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:00,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141500926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:00,934 DEBUG [Thread-594 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4269 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:00,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141500932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:00,939 DEBUG [Thread-588 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4263 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:01,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/cb6694a05da1479bb0028fb9916f5620 2024-11-20T22:24:01,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/fa95887b1c6f4e9b9e214f2b899389b0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/fa95887b1c6f4e9b9e214f2b899389b0 2024-11-20T22:24:01,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/fa95887b1c6f4e9b9e214f2b899389b0, entries=200, sequenceid=118, filesize=38.6 K 2024-11-20T22:24:01,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/26eb7e13e5494c27a96ce8ecea9e6bac as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/26eb7e13e5494c27a96ce8ecea9e6bac 2024-11-20T22:24:01,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:01,042 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/26eb7e13e5494c27a96ce8ecea9e6bac, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T22:24:01,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141501034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:01,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/cb6694a05da1479bb0028fb9916f5620 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/cb6694a05da1479bb0028fb9916f5620 2024-11-20T22:24:01,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/cb6694a05da1479bb0028fb9916f5620, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T22:24:01,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 328fe2cc950802b391cb9dd7043a44b7 in 1282ms, sequenceid=118, compaction requested=false 2024-11-20T22:24:01,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:01,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:24:01,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:01,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:01,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:01,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:01,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:01,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:01,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201187f22096234d8881f565232230630c_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141439870/Put/seqid=0 2024-11-20T22:24:01,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741973_1149 (size=14694) 2024-11-20T22:24:01,321 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:01,327 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201187f22096234d8881f565232230630c_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201187f22096234d8881f565232230630c_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:01,328 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/1091d292d56740478d0de0d81aebeeff, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:01,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/1091d292d56740478d0de0d81aebeeff is 175, key is test_row_0/A:col10/1732141439870/Put/seqid=0 2024-11-20T22:24:01,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741974_1150 (size=39649) 2024-11-20T22:24:01,367 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/1091d292d56740478d0de0d81aebeeff 2024-11-20T22:24:01,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/77d572865dc1447285b4f775d77e9275 is 50, key is test_row_0/B:col10/1732141439870/Put/seqid=0 2024-11-20T22:24:01,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:01,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141501391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:01,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741975_1151 (size=12051) 2024-11-20T22:24:01,422 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/77d572865dc1447285b4f775d77e9275 2024-11-20T22:24:01,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/408169e10bd84091bec341b6918bed48 is 50, key is test_row_0/C:col10/1732141439870/Put/seqid=0 2024-11-20T22:24:01,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741976_1152 (size=12051) 2024-11-20T22:24:01,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/408169e10bd84091bec341b6918bed48 2024-11-20T22:24:01,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:01,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141501503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:01,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/1091d292d56740478d0de0d81aebeeff as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/1091d292d56740478d0de0d81aebeeff 2024-11-20T22:24:01,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/1091d292d56740478d0de0d81aebeeff, entries=200, sequenceid=133, filesize=38.7 K 2024-11-20T22:24:01,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/77d572865dc1447285b4f775d77e9275 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/77d572865dc1447285b4f775d77e9275 2024-11-20T22:24:01,554 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/77d572865dc1447285b4f775d77e9275, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T22:24:01,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/408169e10bd84091bec341b6918bed48 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/408169e10bd84091bec341b6918bed48 2024-11-20T22:24:01,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/408169e10bd84091bec341b6918bed48, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T22:24:01,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 328fe2cc950802b391cb9dd7043a44b7 in 329ms, sequenceid=133, compaction requested=true 2024-11-20T22:24:01,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:01,573 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:01,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:01,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:01,574 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:01,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:01,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:01,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:01,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:01,578 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:01,578 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/A is initiating minor compaction (all files) 2024-11-20T22:24:01,578 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/A in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:01,578 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:01,578 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/B is initiating minor compaction (all files) 2024-11-20T22:24:01,578 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/100c3d6e913c4c33ab6c851bc3ded418, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/fa95887b1c6f4e9b9e214f2b899389b0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/1091d292d56740478d0de0d81aebeeff] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=107.8 K 2024-11-20T22:24:01,578 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/B in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:01,578 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:01,578 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f4fc9f6dcfab4e2cb47e5f86b30ca181, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/26eb7e13e5494c27a96ce8ecea9e6bac, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/77d572865dc1447285b4f775d77e9275] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=35.4 K 2024-11-20T22:24:01,578 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/100c3d6e913c4c33ab6c851bc3ded418, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/fa95887b1c6f4e9b9e214f2b899389b0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/1091d292d56740478d0de0d81aebeeff] 2024-11-20T22:24:01,579 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting f4fc9f6dcfab4e2cb47e5f86b30ca181, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141437047 2024-11-20T22:24:01,580 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 100c3d6e913c4c33ab6c851bc3ded418, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141437047 2024-11-20T22:24:01,581 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 26eb7e13e5494c27a96ce8ecea9e6bac, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141438632 2024-11-20T22:24:01,581 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa95887b1c6f4e9b9e214f2b899389b0, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141438604 2024-11-20T22:24:01,582 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 77d572865dc1447285b4f775d77e9275, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732141439819 2024-11-20T22:24:01,582 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1091d292d56740478d0de0d81aebeeff, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732141439819 2024-11-20T22:24:01,611 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#B#compaction#126 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:01,612 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/43c8ddc18d934846863c583c65d492c7 is 50, key is test_row_0/B:col10/1732141439870/Put/seqid=0 2024-11-20T22:24:01,624 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:01,651 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411203726310244404911948f9747c12cd13d_328fe2cc950802b391cb9dd7043a44b7 store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:01,670 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411203726310244404911948f9747c12cd13d_328fe2cc950802b391cb9dd7043a44b7, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:01,670 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203726310244404911948f9747c12cd13d_328fe2cc950802b391cb9dd7043a44b7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:01,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741977_1153 (size=12359) 2024-11-20T22:24:01,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:01,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:24:01,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:01,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:01,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:01,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:01,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:01,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:01,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741978_1154 (size=4469) 2024-11-20T22:24:01,730 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#A#compaction#127 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:01,730 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d4aa23f7841345a1afbb021ef5f0b0f4 is 175, key is test_row_0/A:col10/1732141439870/Put/seqid=0 2024-11-20T22:24:01,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201f8bc1bc35c448b98f0c5b69436cf060_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141441720/Put/seqid=0 2024-11-20T22:24:01,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741979_1155 (size=31313) 2024-11-20T22:24:01,802 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d4aa23f7841345a1afbb021ef5f0b0f4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d4aa23f7841345a1afbb021ef5f0b0f4 2024-11-20T22:24:01,812 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/A of 328fe2cc950802b391cb9dd7043a44b7 into d4aa23f7841345a1afbb021ef5f0b0f4(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:01,813 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:01,813 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/A, priority=13, startTime=1732141441573; duration=0sec 2024-11-20T22:24:01,813 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:01,813 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:A 2024-11-20T22:24:01,813 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:01,814 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:01,815 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/C is initiating minor compaction (all files) 2024-11-20T22:24:01,815 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/C in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:01,815 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/9bbe160310f948eebbaab8e5bc5ca8b0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/cb6694a05da1479bb0028fb9916f5620, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/408169e10bd84091bec341b6918bed48] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=35.4 K 2024-11-20T22:24:01,816 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bbe160310f948eebbaab8e5bc5ca8b0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141437047 2024-11-20T22:24:01,816 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb6694a05da1479bb0028fb9916f5620, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141438632 2024-11-20T22:24:01,816 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 408169e10bd84091bec341b6918bed48, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732141439819 2024-11-20T22:24:01,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741980_1156 (size=14794) 2024-11-20T22:24:01,834 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:01,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:01,840 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201f8bc1bc35c448b98f0c5b69436cf060_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201f8bc1bc35c448b98f0c5b69436cf060_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:01,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141501836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:01,842 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/272d1c466f254691b11c9869cfcaa6b1, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:01,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/272d1c466f254691b11c9869cfcaa6b1 is 175, key is test_row_0/A:col10/1732141441720/Put/seqid=0 2024-11-20T22:24:01,855 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#C#compaction#129 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:01,858 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/c9f9d175ad0d4924b49d030245e8c352 is 50, key is test_row_0/C:col10/1732141439870/Put/seqid=0 2024-11-20T22:24:01,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741982_1158 (size=12359) 2024-11-20T22:24:01,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741981_1157 (size=39749) 2024-11-20T22:24:01,938 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/272d1c466f254691b11c9869cfcaa6b1 2024-11-20T22:24:01,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:01,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141501955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:01,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/716586d81bd541429cb72479682bf453 is 50, key is test_row_0/B:col10/1732141441720/Put/seqid=0 2024-11-20T22:24:01,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741983_1159 (size=12151) 2024-11-20T22:24:02,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141502044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:02,083 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/43c8ddc18d934846863c583c65d492c7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/43c8ddc18d934846863c583c65d492c7 2024-11-20T22:24:02,104 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/B of 328fe2cc950802b391cb9dd7043a44b7 into 43c8ddc18d934846863c583c65d492c7(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:02,104 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:02,104 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/B, priority=13, startTime=1732141441574; duration=0sec 2024-11-20T22:24:02,104 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:02,104 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:B 2024-11-20T22:24:02,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:24:02,149 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-20T22:24:02,154 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:02,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-20T22:24:02,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:02,156 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:02,157 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:02,157 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:02,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141502160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:02,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:02,309 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:02,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:02,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:02,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:02,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:02,310 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,343 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/c9f9d175ad0d4924b49d030245e8c352 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/c9f9d175ad0d4924b49d030245e8c352 2024-11-20T22:24:02,357 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/C of 328fe2cc950802b391cb9dd7043a44b7 into c9f9d175ad0d4924b49d030245e8c352(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:02,357 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:02,357 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/C, priority=13, startTime=1732141441575; duration=0sec 2024-11-20T22:24:02,357 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:02,357 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:C 2024-11-20T22:24:02,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/716586d81bd541429cb72479682bf453 2024-11-20T22:24:02,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/7b1e1a9416174f10bf2919aaeeaf9643 is 50, key is test_row_0/C:col10/1732141441720/Put/seqid=0 2024-11-20T22:24:02,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:02,465 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:02,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:02,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:02,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:02,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:02,466 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141502468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:02,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741984_1160 (size=12151) 2024-11-20T22:24:02,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/7b1e1a9416174f10bf2919aaeeaf9643 2024-11-20T22:24:02,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/272d1c466f254691b11c9869cfcaa6b1 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/272d1c466f254691b11c9869cfcaa6b1 2024-11-20T22:24:02,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/272d1c466f254691b11c9869cfcaa6b1, entries=200, sequenceid=158, filesize=38.8 K 2024-11-20T22:24:02,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/716586d81bd541429cb72479682bf453 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/716586d81bd541429cb72479682bf453 2024-11-20T22:24:02,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/716586d81bd541429cb72479682bf453, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T22:24:02,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/7b1e1a9416174f10bf2919aaeeaf9643 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7b1e1a9416174f10bf2919aaeeaf9643 2024-11-20T22:24:02,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7b1e1a9416174f10bf2919aaeeaf9643, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T22:24:02,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 328fe2cc950802b391cb9dd7043a44b7 in 831ms, sequenceid=158, compaction requested=false 2024-11-20T22:24:02,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:02,618 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:02,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:02,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:02,622 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:02,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120be89ff415acc4783a27eb651f5185afb_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141441793/Put/seqid=0 2024-11-20T22:24:02,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741985_1161 (size=12304) 2024-11-20T22:24:02,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:02,716 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120be89ff415acc4783a27eb651f5185afb_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120be89ff415acc4783a27eb651f5185afb_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:02,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/215095e572ae429a9e5e88e82b077d4d, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:02,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/215095e572ae429a9e5e88e82b077d4d is 175, key is test_row_0/A:col10/1732141441793/Put/seqid=0 2024-11-20T22:24:02,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:02,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741986_1162 (size=31105) 2024-11-20T22:24:02,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:02,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:03,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141503134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:03,169 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/215095e572ae429a9e5e88e82b077d4d 2024-11-20T22:24:03,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/2c67d8ce7e8948e4995517da8f4c7959 is 50, key is test_row_0/B:col10/1732141441793/Put/seqid=0 2024-11-20T22:24:03,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741987_1163 (size=12151) 2024-11-20T22:24:03,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141503245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:03,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:03,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141503464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:03,650 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/2c67d8ce7e8948e4995517da8f4c7959 2024-11-20T22:24:03,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/74caf10f13d944d7b1aa698c473d3d41 is 50, key is test_row_0/C:col10/1732141441793/Put/seqid=0 2024-11-20T22:24:03,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741988_1164 (size=12151) 2024-11-20T22:24:03,730 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/74caf10f13d944d7b1aa698c473d3d41 2024-11-20T22:24:03,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/215095e572ae429a9e5e88e82b077d4d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/215095e572ae429a9e5e88e82b077d4d 2024-11-20T22:24:03,753 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/215095e572ae429a9e5e88e82b077d4d, entries=150, sequenceid=172, filesize=30.4 K 2024-11-20T22:24:03,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/2c67d8ce7e8948e4995517da8f4c7959 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/2c67d8ce7e8948e4995517da8f4c7959 2024-11-20T22:24:03,765 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/2c67d8ce7e8948e4995517da8f4c7959, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T22:24:03,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/74caf10f13d944d7b1aa698c473d3d41 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/74caf10f13d944d7b1aa698c473d3d41 2024-11-20T22:24:03,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141503773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:03,782 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/74caf10f13d944d7b1aa698c473d3d41, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T22:24:03,783 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 328fe2cc950802b391cb9dd7043a44b7 in 1162ms, sequenceid=172, compaction requested=true 2024-11-20T22:24:03,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:03,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:03,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-20T22:24:03,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-20T22:24:03,788 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-20T22:24:03,788 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6280 sec 2024-11-20T22:24:03,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.6360 sec 2024-11-20T22:24:04,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T22:24:04,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:04,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:04,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:04,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:04,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:04,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:04,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:04,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202638d8f65daf489c96f6f2a477b7d5bc_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141444064/Put/seqid=0 2024-11-20T22:24:04,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741989_1165 (size=14794) 2024-11-20T22:24:04,155 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:04,211 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202638d8f65daf489c96f6f2a477b7d5bc_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202638d8f65daf489c96f6f2a477b7d5bc_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:04,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141504210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:04,219 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/27c30ae5df0543c8b6b1d4dcdadf971d, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:04,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/27c30ae5df0543c8b6b1d4dcdadf971d is 175, key is test_row_0/A:col10/1732141444064/Put/seqid=0 2024-11-20T22:24:04,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:04,263 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-20T22:24:04,265 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:04,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-20T22:24:04,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:04,268 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:04,271 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:04,271 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:04,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141504278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:04,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741990_1166 (size=39749) 2024-11-20T22:24:04,298 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/27c30ae5df0543c8b6b1d4dcdadf971d 2024-11-20T22:24:04,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/a227d2a0b1404b7f92562c06266b8665 is 50, key is test_row_0/B:col10/1732141444064/Put/seqid=0 2024-11-20T22:24:04,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141504320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:04,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:04,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741991_1167 (size=12151) 2024-11-20T22:24:04,425 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:04,426 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:04,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:04,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:04,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:04,427 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141504542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:04,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:04,590 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:04,598 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:04,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:04,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:04,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:04,598 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,750 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:04,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:04,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:04,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:04,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:04,767 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,804 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/a227d2a0b1404b7f92562c06266b8665 2024-11-20T22:24:04,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/631de4c1d12844a79a95a14813afee23 is 50, key is test_row_0/C:col10/1732141444064/Put/seqid=0 2024-11-20T22:24:04,844 INFO [master/6365a1e51efd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T22:24:04,844 INFO [master/6365a1e51efd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T22:24:04,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141504855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:04,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741992_1168 (size=12151) 2024-11-20T22:24:04,868 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/631de4c1d12844a79a95a14813afee23 2024-11-20T22:24:04,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:04,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/27c30ae5df0543c8b6b1d4dcdadf971d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/27c30ae5df0543c8b6b1d4dcdadf971d 2024-11-20T22:24:04,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/27c30ae5df0543c8b6b1d4dcdadf971d, entries=200, sequenceid=198, filesize=38.8 K 2024-11-20T22:24:04,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/a227d2a0b1404b7f92562c06266b8665 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/a227d2a0b1404b7f92562c06266b8665 2024-11-20T22:24:04,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/a227d2a0b1404b7f92562c06266b8665, entries=150, sequenceid=198, filesize=11.9 K 2024-11-20T22:24:04,919 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:04,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:04,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:04,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:04,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:04,920 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/631de4c1d12844a79a95a14813afee23 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/631de4c1d12844a79a95a14813afee23 2024-11-20T22:24:04,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/631de4c1d12844a79a95a14813afee23, entries=150, sequenceid=198, filesize=11.9 K 2024-11-20T22:24:04,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 328fe2cc950802b391cb9dd7043a44b7 in 860ms, sequenceid=198, compaction requested=true 2024-11-20T22:24:04,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:04,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:04,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:04,932 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:04,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:04,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:04,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:04,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:24:04,933 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:04,934 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141916 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:04,934 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/A is initiating minor compaction (all files) 2024-11-20T22:24:04,935 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/A in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:04,935 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d4aa23f7841345a1afbb021ef5f0b0f4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/272d1c466f254691b11c9869cfcaa6b1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/215095e572ae429a9e5e88e82b077d4d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/27c30ae5df0543c8b6b1d4dcdadf971d] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=138.6 K 2024-11-20T22:24:04,935 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:04,935 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d4aa23f7841345a1afbb021ef5f0b0f4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/272d1c466f254691b11c9869cfcaa6b1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/215095e572ae429a9e5e88e82b077d4d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/27c30ae5df0543c8b6b1d4dcdadf971d] 2024-11-20T22:24:04,935 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4aa23f7841345a1afbb021ef5f0b0f4, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732141439819 2024-11-20T22:24:04,936 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 272d1c466f254691b11c9869cfcaa6b1, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732141441363 2024-11-20T22:24:04,937 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 215095e572ae429a9e5e88e82b077d4d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732141441793 2024-11-20T22:24:04,937 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27c30ae5df0543c8b6b1d4dcdadf971d, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732141443101 2024-11-20T22:24:04,937 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:04,937 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/B is initiating minor compaction (all files) 2024-11-20T22:24:04,937 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/B in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:04,938 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/43c8ddc18d934846863c583c65d492c7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/716586d81bd541429cb72479682bf453, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/2c67d8ce7e8948e4995517da8f4c7959, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/a227d2a0b1404b7f92562c06266b8665] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=47.7 K 2024-11-20T22:24:04,939 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 43c8ddc18d934846863c583c65d492c7, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732141439819 2024-11-20T22:24:04,940 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 716586d81bd541429cb72479682bf453, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732141441363 2024-11-20T22:24:04,940 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c67d8ce7e8948e4995517da8f4c7959, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732141441793 2024-11-20T22:24:04,940 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting a227d2a0b1404b7f92562c06266b8665, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732141443101 2024-11-20T22:24:04,960 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:04,963 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:04,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:04,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:04,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:04,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:04,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:04,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:04,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:04,973 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#B#compaction#139 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:04,974 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/56f8fbb3ea824892b806d20eb82508f3 is 50, key is test_row_0/B:col10/1732141444064/Put/seqid=0 2024-11-20T22:24:05,000 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112039af40bbd2074129a52fd3848751ea93_328fe2cc950802b391cb9dd7043a44b7 store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:05,003 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112039af40bbd2074129a52fd3848751ea93_328fe2cc950802b391cb9dd7043a44b7, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:05,003 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112039af40bbd2074129a52fd3848751ea93_328fe2cc950802b391cb9dd7043a44b7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:05,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203e9654e6e17a4e41bbe494f68bae88ab_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141444957/Put/seqid=0 2024-11-20T22:24:05,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741993_1169 (size=12595) 2024-11-20T22:24:05,073 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:05,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:05,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141505070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141505071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141505079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741994_1170 (size=4469) 2024-11-20T22:24:05,103 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#A#compaction#138 average throughput is 0.17 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:05,104 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/6a28d226e77c4c1498f2fe6d1e5372f7 is 175, key is test_row_0/A:col10/1732141444064/Put/seqid=0 2024-11-20T22:24:05,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741995_1171 (size=14794) 2024-11-20T22:24:05,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741996_1172 (size=31549) 2024-11-20T22:24:05,185 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/6a28d226e77c4c1498f2fe6d1e5372f7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/6a28d226e77c4c1498f2fe6d1e5372f7 2024-11-20T22:24:05,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141505186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141505186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141505189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,208 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/A of 328fe2cc950802b391cb9dd7043a44b7 into 6a28d226e77c4c1498f2fe6d1e5372f7(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:05,208 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:05,208 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/A, priority=12, startTime=1732141444932; duration=0sec 2024-11-20T22:24:05,209 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:05,209 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:A 2024-11-20T22:24:05,209 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:05,211 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:05,212 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/C is initiating minor compaction (all files) 2024-11-20T22:24:05,212 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/C in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,212 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/c9f9d175ad0d4924b49d030245e8c352, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7b1e1a9416174f10bf2919aaeeaf9643, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/74caf10f13d944d7b1aa698c473d3d41, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/631de4c1d12844a79a95a14813afee23] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=47.7 K 2024-11-20T22:24:05,212 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9f9d175ad0d4924b49d030245e8c352, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732141439819 2024-11-20T22:24:05,213 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b1e1a9416174f10bf2919aaeeaf9643, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732141441363 2024-11-20T22:24:05,213 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74caf10f13d944d7b1aa698c473d3d41, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732141441793 2024-11-20T22:24:05,214 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 631de4c1d12844a79a95a14813afee23, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732141443101 2024-11-20T22:24:05,226 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:05,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:05,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,242 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#C#compaction#141 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:05,243 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/87126edc615d4d70ad0cd4c400931696 is 50, key is test_row_0/C:col10/1732141444064/Put/seqid=0 2024-11-20T22:24:05,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141505295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741997_1173 (size=12595) 2024-11-20T22:24:05,313 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/87126edc615d4d70ad0cd4c400931696 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/87126edc615d4d70ad0cd4c400931696 2024-11-20T22:24:05,326 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/C of 328fe2cc950802b391cb9dd7043a44b7 into 87126edc615d4d70ad0cd4c400931696(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:05,326 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:05,326 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/C, priority=12, startTime=1732141444932; duration=0sec 2024-11-20T22:24:05,326 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:05,326 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:C 2024-11-20T22:24:05,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141505362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:05,382 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:05,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:05,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,385 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141505392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141505392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141505395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,485 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/56f8fbb3ea824892b806d20eb82508f3 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/56f8fbb3ea824892b806d20eb82508f3 2024-11-20T22:24:05,499 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/B of 328fe2cc950802b391cb9dd7043a44b7 into 56f8fbb3ea824892b806d20eb82508f3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:05,499 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:05,499 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/B, priority=12, startTime=1732141444932; duration=0sec 2024-11-20T22:24:05,499 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:05,499 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:B 2024-11-20T22:24:05,527 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:05,539 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203e9654e6e17a4e41bbe494f68bae88ab_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203e9654e6e17a4e41bbe494f68bae88ab_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:05,543 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,547 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:05,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:05,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,554 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/eb856bc4ce7449bbaaa4a09e1647ec9f, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:05,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/eb856bc4ce7449bbaaa4a09e1647ec9f is 175, key is test_row_0/A:col10/1732141444957/Put/seqid=0 2024-11-20T22:24:05,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741998_1174 (size=39749) 2024-11-20T22:24:05,601 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=209, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/eb856bc4ce7449bbaaa4a09e1647ec9f 2024-11-20T22:24:05,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/022be98f04844cd892e0718ac553831a is 50, key is test_row_0/B:col10/1732141444957/Put/seqid=0 2024-11-20T22:24:05,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741999_1175 (size=12151) 2024-11-20T22:24:05,699 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:05,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:05,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,702 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141505711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141505711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141505717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,855 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:05,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:05,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:05,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:05,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:06,013 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:06,013 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:06,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:06,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:06,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:06,014 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:06,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:06,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:06,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/022be98f04844cd892e0718ac553831a 2024-11-20T22:24:06,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/83e06f7901a44a2fae91d7ca83a21f0e is 50, key is test_row_0/C:col10/1732141444957/Put/seqid=0 2024-11-20T22:24:06,169 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:06,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:06,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:06,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:06,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:06,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:06,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:06,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742000_1176 (size=12151) 2024-11-20T22:24:06,188 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/83e06f7901a44a2fae91d7ca83a21f0e 2024-11-20T22:24:06,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/eb856bc4ce7449bbaaa4a09e1647ec9f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/eb856bc4ce7449bbaaa4a09e1647ec9f 2024-11-20T22:24:06,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/eb856bc4ce7449bbaaa4a09e1647ec9f, entries=200, sequenceid=209, filesize=38.8 K 2024-11-20T22:24:06,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/022be98f04844cd892e0718ac553831a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/022be98f04844cd892e0718ac553831a 2024-11-20T22:24:06,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/022be98f04844cd892e0718ac553831a, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T22:24:06,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/83e06f7901a44a2fae91d7ca83a21f0e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/83e06f7901a44a2fae91d7ca83a21f0e 2024-11-20T22:24:06,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141506222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:06,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141506231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:06,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141506231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:06,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/83e06f7901a44a2fae91d7ca83a21f0e, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T22:24:06,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 328fe2cc950802b391cb9dd7043a44b7 in 1275ms, sequenceid=209, compaction requested=false 2024-11-20T22:24:06,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:06,325 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:06,327 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:06,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:06,327 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:24:06,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:06,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:06,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:06,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:06,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:06,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:06,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:06,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:06,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112056c40b9b687249098a404d1997127fa9_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141445008/Put/seqid=0 2024-11-20T22:24:06,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:06,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742001_1177 (size=12304) 2024-11-20T22:24:06,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:06,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141506442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:06,466 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112056c40b9b687249098a404d1997127fa9_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112056c40b9b687249098a404d1997127fa9_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:06,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/9705a940649d422a90c54b1acbe92f69, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:06,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/9705a940649d422a90c54b1acbe92f69 is 175, key is test_row_0/A:col10/1732141445008/Put/seqid=0 2024-11-20T22:24:06,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742002_1178 (size=31105) 2024-11-20T22:24:06,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141506554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:06,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141506762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:06,930 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/9705a940649d422a90c54b1acbe92f69 2024-11-20T22:24:06,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/49b8d6e94d8544cab1d06b2ea729dc61 is 50, key is test_row_0/B:col10/1732141445008/Put/seqid=0 2024-11-20T22:24:07,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742003_1179 (size=12151) 2024-11-20T22:24:07,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141507067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:07,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141507243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:07,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141507243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:07,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141507248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:07,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141507313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:07,318 DEBUG [Thread-592 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4184 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:07,422 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/49b8d6e94d8544cab1d06b2ea729dc61 2024-11-20T22:24:07,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/79977a90708f49dfb3bd70caa2dd48b6 is 50, key is test_row_0/C:col10/1732141445008/Put/seqid=0 2024-11-20T22:24:07,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742004_1180 (size=12151) 2024-11-20T22:24:07,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141507577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:07,874 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/79977a90708f49dfb3bd70caa2dd48b6 2024-11-20T22:24:07,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/9705a940649d422a90c54b1acbe92f69 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/9705a940649d422a90c54b1acbe92f69 2024-11-20T22:24:07,890 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/9705a940649d422a90c54b1acbe92f69, entries=150, sequenceid=237, filesize=30.4 K 2024-11-20T22:24:07,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/49b8d6e94d8544cab1d06b2ea729dc61 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/49b8d6e94d8544cab1d06b2ea729dc61 2024-11-20T22:24:07,898 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/49b8d6e94d8544cab1d06b2ea729dc61, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T22:24:07,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/79977a90708f49dfb3bd70caa2dd48b6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/79977a90708f49dfb3bd70caa2dd48b6 2024-11-20T22:24:07,908 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/79977a90708f49dfb3bd70caa2dd48b6, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T22:24:07,912 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 328fe2cc950802b391cb9dd7043a44b7 in 1585ms, sequenceid=237, compaction requested=true 2024-11-20T22:24:07,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:07,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:07,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-20T22:24:07,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-20T22:24:07,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-20T22:24:07,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.6480 sec 2024-11-20T22:24:07,925 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 3.6590 sec 2024-11-20T22:24:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:08,375 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-20T22:24:08,378 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-20T22:24:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T22:24:08,382 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:08,383 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:08,383 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:08,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T22:24:08,536 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:08,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T22:24:08,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:08,537 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:08,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:08,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:08,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:08,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:08,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:08,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:08,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208c3b182cfd054848a2598735e5f8256b_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141446432/Put/seqid=0 2024-11-20T22:24:08,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742005_1181 (size=12304) 2024-11-20T22:24:08,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:08,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:08,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T22:24:08,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141508772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:08,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141508883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:08,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T22:24:08,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:08,993 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208c3b182cfd054848a2598735e5f8256b_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208c3b182cfd054848a2598735e5f8256b_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:08,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/62e4fc90926941c6b7c78fa3b2e6f05b, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:08,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/62e4fc90926941c6b7c78fa3b2e6f05b is 175, key is test_row_0/A:col10/1732141446432/Put/seqid=0 2024-11-20T22:24:09,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742006_1182 (size=31105) 2024-11-20T22:24:09,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:09,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141509090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:09,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:09,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141509260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:09,264 DEBUG [Thread-588 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4194 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:09,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:09,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141509266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:09,271 DEBUG [Thread-594 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4192 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:09,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:09,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141509275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:09,276 DEBUG [Thread-586 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4206 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:09,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:09,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141509396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:09,435 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=248, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/62e4fc90926941c6b7c78fa3b2e6f05b 2024-11-20T22:24:09,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/a88e9b77b57f4fcf8530ac73c5269ad4 is 50, key is test_row_0/B:col10/1732141446432/Put/seqid=0 2024-11-20T22:24:09,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742007_1183 (size=12151) 2024-11-20T22:24:09,447 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/a88e9b77b57f4fcf8530ac73c5269ad4 2024-11-20T22:24:09,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/b26a9d979165460081f4c70fbf1c85f1 is 50, key is test_row_0/C:col10/1732141446432/Put/seqid=0 2024-11-20T22:24:09,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742008_1184 (size=12151) 2024-11-20T22:24:09,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T22:24:09,861 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/b26a9d979165460081f4c70fbf1c85f1 2024-11-20T22:24:09,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/62e4fc90926941c6b7c78fa3b2e6f05b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/62e4fc90926941c6b7c78fa3b2e6f05b 2024-11-20T22:24:09,875 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/62e4fc90926941c6b7c78fa3b2e6f05b, entries=150, sequenceid=248, filesize=30.4 K 2024-11-20T22:24:09,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/a88e9b77b57f4fcf8530ac73c5269ad4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/a88e9b77b57f4fcf8530ac73c5269ad4 2024-11-20T22:24:09,881 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/a88e9b77b57f4fcf8530ac73c5269ad4, entries=150, sequenceid=248, filesize=11.9 K 2024-11-20T22:24:09,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/b26a9d979165460081f4c70fbf1c85f1 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/b26a9d979165460081f4c70fbf1c85f1 2024-11-20T22:24:09,888 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/b26a9d979165460081f4c70fbf1c85f1, entries=150, sequenceid=248, filesize=11.9 K 2024-11-20T22:24:09,889 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 328fe2cc950802b391cb9dd7043a44b7 in 1352ms, sequenceid=248, compaction requested=true 2024-11-20T22:24:09,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:09,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:09,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-20T22:24:09,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-20T22:24:09,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-20T22:24:09,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5070 sec 2024-11-20T22:24:09,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.5140 sec 2024-11-20T22:24:09,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:09,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T22:24:09,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:09,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:09,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:09,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:09,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:09,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:09,910 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112048efb5daa3d94aa7b00e27a5174011f7_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141448760/Put/seqid=0 2024-11-20T22:24:09,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742009_1185 (size=14994) 2024-11-20T22:24:09,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141509937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:10,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:10,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141510044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:10,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:10,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141510255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:10,320 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:10,333 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112048efb5daa3d94aa7b00e27a5174011f7_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112048efb5daa3d94aa7b00e27a5174011f7_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:10,334 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d14f33e6ede6459a98505efde89405c5, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:10,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d14f33e6ede6459a98505efde89405c5 is 175, key is test_row_0/A:col10/1732141448760/Put/seqid=0 2024-11-20T22:24:10,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742010_1186 (size=39949) 2024-11-20T22:24:10,349 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d14f33e6ede6459a98505efde89405c5 2024-11-20T22:24:10,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/00b46b9194894ccc8f00b711d16fc1c6 is 50, key is test_row_0/B:col10/1732141448760/Put/seqid=0 2024-11-20T22:24:10,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742011_1187 (size=12301) 2024-11-20T22:24:10,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/00b46b9194894ccc8f00b711d16fc1c6 2024-11-20T22:24:10,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/d37774bf6881493cbaaf1e139b7fa1e7 is 50, key is test_row_0/C:col10/1732141448760/Put/seqid=0 2024-11-20T22:24:10,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T22:24:10,488 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-20T22:24:10,489 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:10,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-20T22:24:10,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T22:24:10,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742012_1188 (size=12301) 2024-11-20T22:24:10,505 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:10,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/d37774bf6881493cbaaf1e139b7fa1e7 2024-11-20T22:24:10,510 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:10,511 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:10,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d14f33e6ede6459a98505efde89405c5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d14f33e6ede6459a98505efde89405c5 2024-11-20T22:24:10,525 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d14f33e6ede6459a98505efde89405c5, entries=200, sequenceid=274, filesize=39.0 K 2024-11-20T22:24:10,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/00b46b9194894ccc8f00b711d16fc1c6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/00b46b9194894ccc8f00b711d16fc1c6 2024-11-20T22:24:10,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/00b46b9194894ccc8f00b711d16fc1c6, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T22:24:10,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/d37774bf6881493cbaaf1e139b7fa1e7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/d37774bf6881493cbaaf1e139b7fa1e7 2024-11-20T22:24:10,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/d37774bf6881493cbaaf1e139b7fa1e7, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T22:24:10,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 328fe2cc950802b391cb9dd7043a44b7 in 648ms, sequenceid=274, compaction requested=true 2024-11-20T22:24:10,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:10,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:10,550 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T22:24:10,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:10,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:10,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:10,550 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T22:24:10,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:10,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:10,555 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 173457 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T22:24:10,556 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/A is initiating minor compaction (all files) 2024-11-20T22:24:10,556 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/A in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:10,556 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/6a28d226e77c4c1498f2fe6d1e5372f7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/eb856bc4ce7449bbaaa4a09e1647ec9f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/9705a940649d422a90c54b1acbe92f69, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/62e4fc90926941c6b7c78fa3b2e6f05b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d14f33e6ede6459a98505efde89405c5] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=169.4 K 2024-11-20T22:24:10,556 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:10,556 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/6a28d226e77c4c1498f2fe6d1e5372f7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/eb856bc4ce7449bbaaa4a09e1647ec9f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/9705a940649d422a90c54b1acbe92f69, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/62e4fc90926941c6b7c78fa3b2e6f05b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d14f33e6ede6459a98505efde89405c5] 2024-11-20T22:24:10,557 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61349 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T22:24:10,557 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/B is initiating minor compaction (all files) 2024-11-20T22:24:10,558 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/B in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:10,558 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/56f8fbb3ea824892b806d20eb82508f3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/022be98f04844cd892e0718ac553831a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/49b8d6e94d8544cab1d06b2ea729dc61, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/a88e9b77b57f4fcf8530ac73c5269ad4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/00b46b9194894ccc8f00b711d16fc1c6] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=59.9 K 2024-11-20T22:24:10,559 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56f8fbb3ea824892b806d20eb82508f3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732141443101 2024-11-20T22:24:10,559 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a28d226e77c4c1498f2fe6d1e5372f7, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732141443101 2024-11-20T22:24:10,559 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 022be98f04844cd892e0718ac553831a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732141444130 2024-11-20T22:24:10,560 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting eb856bc4ce7449bbaaa4a09e1647ec9f, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732141444130 2024-11-20T22:24:10,560 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49b8d6e94d8544cab1d06b2ea729dc61, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732141445008 2024-11-20T22:24:10,560 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 9705a940649d422a90c54b1acbe92f69, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732141445008 2024-11-20T22:24:10,561 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting a88e9b77b57f4fcf8530ac73c5269ad4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732141446414 2024-11-20T22:24:10,561 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 62e4fc90926941c6b7c78fa3b2e6f05b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732141446414 2024-11-20T22:24:10,564 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00b46b9194894ccc8f00b711d16fc1c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732141448696 2024-11-20T22:24:10,564 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting d14f33e6ede6459a98505efde89405c5, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732141448696 2024-11-20T22:24:10,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:10,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:10,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:10,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:10,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:10,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:10,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:10,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:10,595 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:10,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T22:24:10,601 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#B#compaction#154 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:10,602 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/c4f11816b0024aaaa79d06b8346c252d is 50, key is test_row_0/B:col10/1732141448760/Put/seqid=0 2024-11-20T22:24:10,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f8747207ad064ef38a055e01a9ba1800_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141450563/Put/seqid=0 2024-11-20T22:24:10,619 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120d2a449a603094e06a66f5c8eccc5a0c9_328fe2cc950802b391cb9dd7043a44b7 store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:10,622 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120d2a449a603094e06a66f5c8eccc5a0c9_328fe2cc950802b391cb9dd7043a44b7, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:10,622 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d2a449a603094e06a66f5c8eccc5a0c9_328fe2cc950802b391cb9dd7043a44b7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:10,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742013_1189 (size=12915) 2024-11-20T22:24:10,660 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/c4f11816b0024aaaa79d06b8346c252d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/c4f11816b0024aaaa79d06b8346c252d 2024-11-20T22:24:10,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742014_1190 (size=12454) 2024-11-20T22:24:10,668 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/B of 328fe2cc950802b391cb9dd7043a44b7 into c4f11816b0024aaaa79d06b8346c252d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:10,668 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:10,668 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/B, priority=11, startTime=1732141450550; duration=0sec 2024-11-20T22:24:10,668 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:10,668 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:B 2024-11-20T22:24:10,669 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T22:24:10,669 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:10,673 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:10,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:10,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:10,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:10,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:10,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:10,675 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61349 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T22:24:10,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:10,675 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/C is initiating minor compaction (all files) 2024-11-20T22:24:10,675 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/C in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:10,675 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/87126edc615d4d70ad0cd4c400931696, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/83e06f7901a44a2fae91d7ca83a21f0e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/79977a90708f49dfb3bd70caa2dd48b6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/b26a9d979165460081f4c70fbf1c85f1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/d37774bf6881493cbaaf1e139b7fa1e7] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=59.9 K 2024-11-20T22:24:10,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:10,676 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87126edc615d4d70ad0cd4c400931696, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732141443101 2024-11-20T22:24:10,677 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83e06f7901a44a2fae91d7ca83a21f0e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732141444130 2024-11-20T22:24:10,679 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79977a90708f49dfb3bd70caa2dd48b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732141445008 2024-11-20T22:24:10,679 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f8747207ad064ef38a055e01a9ba1800_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f8747207ad064ef38a055e01a9ba1800_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:10,679 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting b26a9d979165460081f4c70fbf1c85f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732141446414 2024-11-20T22:24:10,681 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting d37774bf6881493cbaaf1e139b7fa1e7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732141448696 2024-11-20T22:24:10,684 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d1142fa1075c4d8b9f4ee65bd29b173f, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:10,685 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d1142fa1075c4d8b9f4ee65bd29b173f is 175, key is test_row_0/A:col10/1732141450563/Put/seqid=0 2024-11-20T22:24:10,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742015_1191 (size=4469) 2024-11-20T22:24:10,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:10,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141510706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:10,722 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#C#compaction#156 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:10,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742016_1192 (size=31255) 2024-11-20T22:24:10,723 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/0201b03ebc844a6dbda74275e2272442 is 50, key is test_row_0/C:col10/1732141448760/Put/seqid=0 2024-11-20T22:24:10,724 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=285, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d1142fa1075c4d8b9f4ee65bd29b173f 2024-11-20T22:24:10,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/f10803312a994b8c9136aad32bd1cbe8 is 50, key is test_row_0/B:col10/1732141450563/Put/seqid=0 2024-11-20T22:24:10,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742017_1193 (size=12915) 2024-11-20T22:24:10,779 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/0201b03ebc844a6dbda74275e2272442 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/0201b03ebc844a6dbda74275e2272442 2024-11-20T22:24:10,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742018_1194 (size=12301) 2024-11-20T22:24:10,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/f10803312a994b8c9136aad32bd1cbe8 2024-11-20T22:24:10,792 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/C of 328fe2cc950802b391cb9dd7043a44b7 into 0201b03ebc844a6dbda74275e2272442(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:10,792 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:10,792 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/C, priority=11, startTime=1732141450550; duration=0sec 2024-11-20T22:24:10,792 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:10,792 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:C 2024-11-20T22:24:10,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T22:24:10,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:10,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141510809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:10,832 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:10,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:10,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:10,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:10,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:10,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:10,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:10,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:10,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/4576c64870e74a838bc4c557c35ddb6e is 50, key is test_row_0/C:col10/1732141450563/Put/seqid=0 2024-11-20T22:24:10,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742019_1195 (size=12301) 2024-11-20T22:24:10,864 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/4576c64870e74a838bc4c557c35ddb6e 2024-11-20T22:24:10,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d1142fa1075c4d8b9f4ee65bd29b173f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d1142fa1075c4d8b9f4ee65bd29b173f 2024-11-20T22:24:10,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d1142fa1075c4d8b9f4ee65bd29b173f, entries=150, sequenceid=285, filesize=30.5 K 2024-11-20T22:24:10,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/f10803312a994b8c9136aad32bd1cbe8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f10803312a994b8c9136aad32bd1cbe8 2024-11-20T22:24:10,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f10803312a994b8c9136aad32bd1cbe8, entries=150, sequenceid=285, filesize=12.0 K 2024-11-20T22:24:10,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/4576c64870e74a838bc4c557c35ddb6e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4576c64870e74a838bc4c557c35ddb6e 2024-11-20T22:24:10,987 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:10,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4576c64870e74a838bc4c557c35ddb6e, entries=150, sequenceid=285, filesize=12.0 K 2024-11-20T22:24:10,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 328fe2cc950802b391cb9dd7043a44b7 in 424ms, sequenceid=285, compaction requested=false 2024-11-20T22:24:10,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:10,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:10,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:10,993 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:24:10,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:10,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:10,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:10,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:10,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:10,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:11,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:11,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:11,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120924110d847a342d9958379053c428b62_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141450686/Put/seqid=0 2024-11-20T22:24:11,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742020_1196 (size=12454) 2024-11-20T22:24:11,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:11,055 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120924110d847a342d9958379053c428b62_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120924110d847a342d9958379053c428b62_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:11,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/771a15a1c280470eafa06918eb1403b0, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:11,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/771a15a1c280470eafa06918eb1403b0 is 175, key is test_row_0/A:col10/1732141450686/Put/seqid=0 2024-11-20T22:24:11,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:11,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141511069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:11,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742021_1197 (size=31255) 2024-11-20T22:24:11,075 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=312, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/771a15a1c280470eafa06918eb1403b0 2024-11-20T22:24:11,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/43e1289798844d9e9a518cd48c3c657b is 50, key is test_row_0/B:col10/1732141450686/Put/seqid=0 2024-11-20T22:24:11,096 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#A#compaction#153 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:11,096 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/78fbe6b3f2b64f649ed9a4881fd9f7bb is 175, key is test_row_0/A:col10/1732141448760/Put/seqid=0 2024-11-20T22:24:11,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T22:24:11,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742022_1198 (size=12301) 2024-11-20T22:24:11,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742023_1199 (size=31869) 2024-11-20T22:24:11,146 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/78fbe6b3f2b64f649ed9a4881fd9f7bb as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/78fbe6b3f2b64f649ed9a4881fd9f7bb 2024-11-20T22:24:11,152 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/A of 328fe2cc950802b391cb9dd7043a44b7 into 78fbe6b3f2b64f649ed9a4881fd9f7bb(size=31.1 K), total size for store is 61.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:11,153 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:11,153 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/A, priority=11, startTime=1732141450550; duration=0sec 2024-11-20T22:24:11,153 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:11,153 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:A 2024-11-20T22:24:11,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:11,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141511176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:11,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38184 deadline: 1732141511329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:11,331 DEBUG [Thread-592 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8197 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:11,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141511377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:11,525 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/43e1289798844d9e9a518cd48c3c657b 2024-11-20T22:24:11,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/05b7b93bd86e457f857dcd0f7ac290cb is 50, key is test_row_0/C:col10/1732141450686/Put/seqid=0 2024-11-20T22:24:11,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742024_1200 (size=12301) 2024-11-20T22:24:11,564 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/05b7b93bd86e457f857dcd0f7ac290cb 2024-11-20T22:24:11,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/771a15a1c280470eafa06918eb1403b0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/771a15a1c280470eafa06918eb1403b0 2024-11-20T22:24:11,574 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/771a15a1c280470eafa06918eb1403b0, entries=150, sequenceid=312, filesize=30.5 K 2024-11-20T22:24:11,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/43e1289798844d9e9a518cd48c3c657b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/43e1289798844d9e9a518cd48c3c657b 2024-11-20T22:24:11,585 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/43e1289798844d9e9a518cd48c3c657b, entries=150, sequenceid=312, filesize=12.0 K 2024-11-20T22:24:11,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/05b7b93bd86e457f857dcd0f7ac290cb as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/05b7b93bd86e457f857dcd0f7ac290cb 2024-11-20T22:24:11,593 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/05b7b93bd86e457f857dcd0f7ac290cb, entries=150, sequenceid=312, filesize=12.0 K 2024-11-20T22:24:11,594 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 328fe2cc950802b391cb9dd7043a44b7 in 601ms, sequenceid=312, compaction requested=true 2024-11-20T22:24:11,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:11,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:11,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-20T22:24:11,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-20T22:24:11,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-20T22:24:11,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0850 sec 2024-11-20T22:24:11,599 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.1080 sec 2024-11-20T22:24:11,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T22:24:11,604 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-20T22:24:11,605 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-20T22:24:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:11,606 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:11,607 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:11,607 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:11,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:11,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:24:11,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:11,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:11,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:11,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:11,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:11,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:11,695 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120017ead8f706c4e118a3a15c4efe3993c_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141451068/Put/seqid=0 2024-11-20T22:24:11,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742025_1201 (size=14994) 2024-11-20T22:24:11,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:11,760 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:11,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:11,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:11,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:11,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:11,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:11,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:11,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:11,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141511759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:11,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:11,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:11,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141511863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:11,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:11,913 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:11,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:11,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:11,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:11,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:11,914 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:11,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:11,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,066 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:12,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:12,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:12,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:12,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141512068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:12,102 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:12,106 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120017ead8f706c4e118a3a15c4efe3993c_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120017ead8f706c4e118a3a15c4efe3993c_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:12,107 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/a883a888924a4b1fa55e773fc9a88533, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:12,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/a883a888924a4b1fa55e773fc9a88533 is 175, key is test_row_0/A:col10/1732141451068/Put/seqid=0 2024-11-20T22:24:12,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742026_1202 (size=39949) 2024-11-20T22:24:12,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:12,218 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:12,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:12,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:12,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,371 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:12,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:12,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:12,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,371 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:12,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141512372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:12,523 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:12,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:12,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:12,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,526 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=325, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/a883a888924a4b1fa55e773fc9a88533 2024-11-20T22:24:12,533 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/3a2effc828954764895ff39fdf0558aa is 50, key is test_row_0/B:col10/1732141451068/Put/seqid=0 2024-11-20T22:24:12,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742027_1203 (size=12301) 2024-11-20T22:24:12,676 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:12,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:12,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:12,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,677 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:12,829 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:12,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:12,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:12,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141512876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:12,937 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/3a2effc828954764895ff39fdf0558aa 2024-11-20T22:24:12,944 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/0ee5862b859341d0b89b7c9f070ea173 is 50, key is test_row_0/C:col10/1732141451068/Put/seqid=0 2024-11-20T22:24:12,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742028_1204 (size=12301) 2024-11-20T22:24:12,981 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:12,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:12,982 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:12,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:13,134 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:13,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:13,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:13,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:13,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:13,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:13,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:13,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38152 deadline: 1732141513278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:13,279 DEBUG [Thread-588 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8209 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:13,286 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:13,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:13,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:13,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:13,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:13,287 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:13,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:13,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38164 deadline: 1732141513289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:13,292 DEBUG [Thread-586 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8223 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:13,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38180 deadline: 1732141513296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:13,297 DEBUG [Thread-594 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8217 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:13,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/0ee5862b859341d0b89b7c9f070ea173 2024-11-20T22:24:13,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/a883a888924a4b1fa55e773fc9a88533 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a883a888924a4b1fa55e773fc9a88533 2024-11-20T22:24:13,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a883a888924a4b1fa55e773fc9a88533, entries=200, sequenceid=325, filesize=39.0 K 2024-11-20T22:24:13,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/3a2effc828954764895ff39fdf0558aa as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/3a2effc828954764895ff39fdf0558aa 2024-11-20T22:24:13,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/3a2effc828954764895ff39fdf0558aa, entries=150, sequenceid=325, filesize=12.0 K 2024-11-20T22:24:13,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/0ee5862b859341d0b89b7c9f070ea173 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/0ee5862b859341d0b89b7c9f070ea173 2024-11-20T22:24:13,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/0ee5862b859341d0b89b7c9f070ea173, entries=150, sequenceid=325, filesize=12.0 K 2024-11-20T22:24:13,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 328fe2cc950802b391cb9dd7043a44b7 in 1694ms, sequenceid=325, compaction requested=true 2024-11-20T22:24:13,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:13,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:13,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:13,376 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:13,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:13,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:13,376 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:13,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:13,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:13,378 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:13,378 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:13,378 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/A is initiating minor compaction (all files) 2024-11-20T22:24:13,378 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/B is initiating minor compaction (all files) 2024-11-20T22:24:13,378 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/B in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:13,378 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/A in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:13,378 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/c4f11816b0024aaaa79d06b8346c252d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f10803312a994b8c9136aad32bd1cbe8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/43e1289798844d9e9a518cd48c3c657b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/3a2effc828954764895ff39fdf0558aa] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=48.7 K 2024-11-20T22:24:13,378 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/78fbe6b3f2b64f649ed9a4881fd9f7bb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d1142fa1075c4d8b9f4ee65bd29b173f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/771a15a1c280470eafa06918eb1403b0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a883a888924a4b1fa55e773fc9a88533] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=131.2 K 2024-11-20T22:24:13,378 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:13,378 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/78fbe6b3f2b64f649ed9a4881fd9f7bb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d1142fa1075c4d8b9f4ee65bd29b173f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/771a15a1c280470eafa06918eb1403b0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a883a888924a4b1fa55e773fc9a88533] 2024-11-20T22:24:13,379 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c4f11816b0024aaaa79d06b8346c252d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732141448696 2024-11-20T22:24:13,379 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78fbe6b3f2b64f649ed9a4881fd9f7bb, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732141448696 2024-11-20T22:24:13,379 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1142fa1075c4d8b9f4ee65bd29b173f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732141449912 2024-11-20T22:24:13,379 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting f10803312a994b8c9136aad32bd1cbe8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732141449912 2024-11-20T22:24:13,379 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 771a15a1c280470eafa06918eb1403b0, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732141450686 2024-11-20T22:24:13,379 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 43e1289798844d9e9a518cd48c3c657b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732141450686 2024-11-20T22:24:13,380 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a2effc828954764895ff39fdf0558aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732141451055 2024-11-20T22:24:13,380 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting a883a888924a4b1fa55e773fc9a88533, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732141451055 2024-11-20T22:24:13,390 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:13,405 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#B#compaction#166 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:13,406 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/965ab00916b64a2e91f4ade8d5d5f8db is 50, key is test_row_0/B:col10/1732141451068/Put/seqid=0 2024-11-20T22:24:13,407 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120df4a4e6a8c31414595b5748cc6eca8c4_328fe2cc950802b391cb9dd7043a44b7 store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:13,409 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120df4a4e6a8c31414595b5748cc6eca8c4_328fe2cc950802b391cb9dd7043a44b7, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:13,410 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120df4a4e6a8c31414595b5748cc6eca8c4_328fe2cc950802b391cb9dd7043a44b7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:13,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742029_1205 (size=13051) 2024-11-20T22:24:13,419 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/965ab00916b64a2e91f4ade8d5d5f8db as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/965ab00916b64a2e91f4ade8d5d5f8db 2024-11-20T22:24:13,424 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/B of 328fe2cc950802b391cb9dd7043a44b7 into 965ab00916b64a2e91f4ade8d5d5f8db(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:13,425 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:13,425 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/B, priority=12, startTime=1732141453376; duration=0sec 2024-11-20T22:24:13,425 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:13,425 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:B 2024-11-20T22:24:13,425 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:13,426 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:13,426 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/C is initiating minor compaction (all files) 2024-11-20T22:24:13,426 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/C in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:13,426 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/0201b03ebc844a6dbda74275e2272442, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4576c64870e74a838bc4c557c35ddb6e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/05b7b93bd86e457f857dcd0f7ac290cb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/0ee5862b859341d0b89b7c9f070ea173] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=48.7 K 2024-11-20T22:24:13,428 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0201b03ebc844a6dbda74275e2272442, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732141448696 2024-11-20T22:24:13,428 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4576c64870e74a838bc4c557c35ddb6e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732141449912 2024-11-20T22:24:13,429 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 05b7b93bd86e457f857dcd0f7ac290cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732141450686 2024-11-20T22:24:13,429 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ee5862b859341d0b89b7c9f070ea173, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732141451055 2024-11-20T22:24:13,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742030_1206 (size=4469) 2024-11-20T22:24:13,433 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#A#compaction#165 average throughput is 0.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:13,434 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/b6c45626d3f142aa9292fe3a697d1f11 is 175, key is test_row_0/A:col10/1732141451068/Put/seqid=0 2024-11-20T22:24:13,439 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:13,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:13,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742031_1207 (size=32005) 2024-11-20T22:24:13,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:13,440 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:24:13,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:13,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:13,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:13,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:13,443 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#C#compaction#167 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:13,443 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/e03197e5a019458ba29541f4ddb0bb3f is 50, key is test_row_0/C:col10/1732141451068/Put/seqid=0 2024-11-20T22:24:13,449 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/b6c45626d3f142aa9292fe3a697d1f11 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/b6c45626d3f142aa9292fe3a697d1f11 2024-11-20T22:24:13,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:13,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:13,464 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/A of 328fe2cc950802b391cb9dd7043a44b7 into b6c45626d3f142aa9292fe3a697d1f11(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:13,464 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:13,464 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/A, priority=12, startTime=1732141453376; duration=0sec 2024-11-20T22:24:13,464 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:13,464 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:A 2024-11-20T22:24:13,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742032_1208 (size=13051) 2024-11-20T22:24:13,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205052c5dafc604a238d79403ba6c6294d_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141451757/Put/seqid=0 2024-11-20T22:24:13,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742033_1209 (size=12454) 2024-11-20T22:24:13,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,503 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205052c5dafc604a238d79403ba6c6294d_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205052c5dafc604a238d79403ba6c6294d_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:13,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/0eb810fd14bc443b9c6b61e06de71832, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:13,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/0eb810fd14bc443b9c6b61e06de71832 is 175, key is test_row_0/A:col10/1732141451757/Put/seqid=0 2024-11-20T22:24:13,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742034_1210 (size=31255) 2024-11-20T22:24:13,510 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=350, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/0eb810fd14bc443b9c6b61e06de71832 2024-11-20T22:24:13,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/8b41ea37415d41748fe3d1e71365fd1d is 50, key is test_row_0/B:col10/1732141451757/Put/seqid=0 2024-11-20T22:24:13,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742035_1211 (size=12301) 2024-11-20T22:24:13,525 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/8b41ea37415d41748fe3d1e71365fd1d 2024-11-20T22:24:13,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/ce17b97556c04d3cadc0badf93da54a7 is 50, key is test_row_0/C:col10/1732141451757/Put/seqid=0 2024-11-20T22:24:13,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742036_1212 (size=12301) 2024-11-20T22:24:13,549 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/ce17b97556c04d3cadc0badf93da54a7 2024-11-20T22:24:13,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/0eb810fd14bc443b9c6b61e06de71832 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/0eb810fd14bc443b9c6b61e06de71832 2024-11-20T22:24:13,563 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/0eb810fd14bc443b9c6b61e06de71832, entries=150, sequenceid=350, filesize=30.5 K 2024-11-20T22:24:13,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/8b41ea37415d41748fe3d1e71365fd1d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/8b41ea37415d41748fe3d1e71365fd1d 2024-11-20T22:24:13,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,570 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/8b41ea37415d41748fe3d1e71365fd1d, entries=150, sequenceid=350, filesize=12.0 K 2024-11-20T22:24:13,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/ce17b97556c04d3cadc0badf93da54a7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/ce17b97556c04d3cadc0badf93da54a7 2024-11-20T22:24:13,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,576 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/ce17b97556c04d3cadc0badf93da54a7, entries=150, sequenceid=350, filesize=12.0 K 2024-11-20T22:24:13,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,576 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 328fe2cc950802b391cb9dd7043a44b7 in 136ms, sequenceid=350, compaction requested=false 2024-11-20T22:24:13,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:13,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:13,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-20T22:24:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-20T22:24:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-20T22:24:13,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9700 sec 2024-11-20T22:24:13,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,580 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.9740 sec 2024-11-20T22:24:13,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:13,710 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-20T22:24:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,712 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-20T22:24:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T22:24:13,714 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,715 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:13,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T22:24:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,868 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T22:24:13,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:13,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:13,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:13,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-20T22:24:13,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-20T22:24:13,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,873 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-20T22:24:13,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,873 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 156 msec 2024-11-20T22:24:13,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,876 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 163 msec 2024-11-20T22:24:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,879 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/e03197e5a019458ba29541f4ddb0bb3f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/e03197e5a019458ba29541f4ddb0bb3f 2024-11-20T22:24:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,889 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/C of 328fe2cc950802b391cb9dd7043a44b7 into e03197e5a019458ba29541f4ddb0bb3f(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:13,889 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:13,889 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/C, priority=12, startTime=1732141453376; duration=0sec 2024-11-20T22:24:13,889 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:13,889 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:C 2024-11-20T22:24:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:13,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T22:24:14,016 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-20T22:24:14,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,019 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-20T22:24:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,021 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,021 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:14,022 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:14,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T22:24:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:14,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:14,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:14,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:14,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:14,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:14,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:14,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:14,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120da3132a356df4876a877d056017afafd_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141454069/Put/seqid=0 2024-11-20T22:24:14,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T22:24:14,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742037_1213 (size=17534) 2024-11-20T22:24:14,132 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,137 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120da3132a356df4876a877d056017afafd_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120da3132a356df4876a877d056017afafd_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:14,140 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/818c3e944dee48cb8f2eaa92b8628fc4, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:14,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/818c3e944dee48cb8f2eaa92b8628fc4 is 175, key is test_row_0/A:col10/1732141454069/Put/seqid=0 2024-11-20T22:24:14,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742038_1214 (size=48639) 2024-11-20T22:24:14,165 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/818c3e944dee48cb8f2eaa92b8628fc4 2024-11-20T22:24:14,174 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:14,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/5c81fc03693645a5a2252a4f5f8a55cf is 50, key is test_row_0/B:col10/1732141454069/Put/seqid=0 2024-11-20T22:24:14,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T22:24:14,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:14,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:14,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:14,175 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:14,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141514186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:14,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742039_1215 (size=12301) 2024-11-20T22:24:14,196 DEBUG [Thread-603 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x768577a2 to 127.0.0.1:51822 2024-11-20T22:24:14,196 DEBUG [Thread-597 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3ec46f90 to 127.0.0.1:51822 2024-11-20T22:24:14,196 DEBUG [Thread-603 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:14,196 DEBUG [Thread-597 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:14,197 DEBUG [Thread-601 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x473f181f to 127.0.0.1:51822 2024-11-20T22:24:14,197 DEBUG [Thread-601 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:14,198 DEBUG [Thread-599 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7f63b68c to 127.0.0.1:51822 2024-11-20T22:24:14,198 DEBUG [Thread-599 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:14,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:14,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141514290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:14,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T22:24:14,328 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:14,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T22:24:14,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:14,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:14,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:14,329 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,480 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:14,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T22:24:14,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:14,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:14,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:14,481 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:14,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141514493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:14,590 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/5c81fc03693645a5a2252a4f5f8a55cf 2024-11-20T22:24:14,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/22804ae344ce42efba55a903b6117ba4 is 50, key is test_row_0/C:col10/1732141454069/Put/seqid=0 2024-11-20T22:24:14,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742040_1216 (size=12301) 2024-11-20T22:24:14,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T22:24:14,634 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:14,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T22:24:14,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:14,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:14,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:14,635 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,787 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:14,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T22:24:14,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:14,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:14,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:14,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:14,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38194 deadline: 1732141514796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:14,941 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:14,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T22:24:14,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:14,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:14,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:14,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:14,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:15,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/22804ae344ce42efba55a903b6117ba4 2024-11-20T22:24:15,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/818c3e944dee48cb8f2eaa92b8628fc4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/818c3e944dee48cb8f2eaa92b8628fc4 2024-11-20T22:24:15,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/818c3e944dee48cb8f2eaa92b8628fc4, entries=250, sequenceid=364, filesize=47.5 K 2024-11-20T22:24:15,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/5c81fc03693645a5a2252a4f5f8a55cf as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/5c81fc03693645a5a2252a4f5f8a55cf 2024-11-20T22:24:15,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/5c81fc03693645a5a2252a4f5f8a55cf, entries=150, sequenceid=364, filesize=12.0 K 2024-11-20T22:24:15,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/22804ae344ce42efba55a903b6117ba4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/22804ae344ce42efba55a903b6117ba4 2024-11-20T22:24:15,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/22804ae344ce42efba55a903b6117ba4, entries=150, sequenceid=364, filesize=12.0 K 2024-11-20T22:24:15,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 328fe2cc950802b391cb9dd7043a44b7 in 958ms, sequenceid=364, compaction requested=true 2024-11-20T22:24:15,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:15,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:15,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:15,029 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:15,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:15,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:15,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 328fe2cc950802b391cb9dd7043a44b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:15,029 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:15,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:15,030 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111899 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:15,030 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:15,030 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/A is initiating minor compaction (all files) 2024-11-20T22:24:15,030 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/B is initiating minor compaction (all files) 2024-11-20T22:24:15,030 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/A in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:15,030 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/B in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:15,030 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/965ab00916b64a2e91f4ade8d5d5f8db, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/8b41ea37415d41748fe3d1e71365fd1d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/5c81fc03693645a5a2252a4f5f8a55cf] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=36.8 K 2024-11-20T22:24:15,030 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/b6c45626d3f142aa9292fe3a697d1f11, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/0eb810fd14bc443b9c6b61e06de71832, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/818c3e944dee48cb8f2eaa92b8628fc4] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=109.3 K 2024-11-20T22:24:15,030 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:15,030 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/b6c45626d3f142aa9292fe3a697d1f11, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/0eb810fd14bc443b9c6b61e06de71832, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/818c3e944dee48cb8f2eaa92b8628fc4] 2024-11-20T22:24:15,030 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 965ab00916b64a2e91f4ade8d5d5f8db, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732141451055 2024-11-20T22:24:15,031 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6c45626d3f142aa9292fe3a697d1f11, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732141451055 2024-11-20T22:24:15,031 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b41ea37415d41748fe3d1e71365fd1d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732141451740 2024-11-20T22:24:15,031 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0eb810fd14bc443b9c6b61e06de71832, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732141451740 2024-11-20T22:24:15,031 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c81fc03693645a5a2252a4f5f8a55cf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732141454056 2024-11-20T22:24:15,031 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 818c3e944dee48cb8f2eaa92b8628fc4, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732141453955 2024-11-20T22:24:15,040 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#B#compaction#174 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:15,040 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/dd1c0d2994304746915b085adc1af111 is 50, key is test_row_0/B:col10/1732141454069/Put/seqid=0 2024-11-20T22:24:15,041 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:15,043 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120ba0ad76ffcd44a378ace27d25acb4603_328fe2cc950802b391cb9dd7043a44b7 store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:15,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742041_1217 (size=13153) 2024-11-20T22:24:15,069 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120ba0ad76ffcd44a378ace27d25acb4603_328fe2cc950802b391cb9dd7043a44b7, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:15,069 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ba0ad76ffcd44a378ace27d25acb4603_328fe2cc950802b391cb9dd7043a44b7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:15,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742042_1218 (size=4469) 2024-11-20T22:24:15,098 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:15,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T22:24:15,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:15,099 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:24:15,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:15,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:15,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:15,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:15,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:15,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:15,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112068d9bf53cfb64478bea563573ca9730e_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141454177/Put/seqid=0 2024-11-20T22:24:15,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742043_1219 (size=12454) 2024-11-20T22:24:15,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T22:24:15,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:15,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. as already flushing 2024-11-20T22:24:15,302 DEBUG [Thread-590 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x15736fcc to 127.0.0.1:51822 2024-11-20T22:24:15,302 DEBUG [Thread-590 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:15,450 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/dd1c0d2994304746915b085adc1af111 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/dd1c0d2994304746915b085adc1af111 2024-11-20T22:24:15,456 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/B of 328fe2cc950802b391cb9dd7043a44b7 into dd1c0d2994304746915b085adc1af111(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:15,456 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:15,456 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/B, priority=13, startTime=1732141455029; duration=0sec 2024-11-20T22:24:15,456 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:15,456 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:B 2024-11-20T22:24:15,456 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:15,457 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:15,457 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 328fe2cc950802b391cb9dd7043a44b7/C is initiating minor compaction (all files) 2024-11-20T22:24:15,457 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 328fe2cc950802b391cb9dd7043a44b7/C in TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:15,457 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/e03197e5a019458ba29541f4ddb0bb3f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/ce17b97556c04d3cadc0badf93da54a7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/22804ae344ce42efba55a903b6117ba4] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp, totalSize=36.8 K 2024-11-20T22:24:15,457 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting e03197e5a019458ba29541f4ddb0bb3f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732141451055 2024-11-20T22:24:15,458 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ce17b97556c04d3cadc0badf93da54a7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732141451740 2024-11-20T22:24:15,458 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 22804ae344ce42efba55a903b6117ba4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732141454056 2024-11-20T22:24:15,466 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#C#compaction#177 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:15,467 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/729573e259d54812a0db0dba4643573e is 50, key is test_row_0/C:col10/1732141454069/Put/seqid=0 2024-11-20T22:24:15,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742044_1220 (size=13153) 2024-11-20T22:24:15,474 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 328fe2cc950802b391cb9dd7043a44b7#A#compaction#175 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:15,475 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/451cdc9d616c4dbab377d30e639230d0 is 175, key is test_row_0/A:col10/1732141454069/Put/seqid=0 2024-11-20T22:24:15,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742045_1221 (size=32107) 2024-11-20T22:24:15,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:15,529 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112068d9bf53cfb64478bea563573ca9730e_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112068d9bf53cfb64478bea563573ca9730e_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:15,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d7ed4aa714d64d21a1a4ac331d7ed490, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:15,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d7ed4aa714d64d21a1a4ac331d7ed490 is 175, key is test_row_0/A:col10/1732141454177/Put/seqid=0 2024-11-20T22:24:15,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742046_1222 (size=31255) 2024-11-20T22:24:15,877 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/729573e259d54812a0db0dba4643573e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/729573e259d54812a0db0dba4643573e 2024-11-20T22:24:15,883 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/C of 328fe2cc950802b391cb9dd7043a44b7 into 729573e259d54812a0db0dba4643573e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:15,883 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:15,883 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/C, priority=13, startTime=1732141455029; duration=0sec 2024-11-20T22:24:15,883 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:15,883 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:C 2024-11-20T22:24:15,885 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/451cdc9d616c4dbab377d30e639230d0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/451cdc9d616c4dbab377d30e639230d0 2024-11-20T22:24:15,894 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 328fe2cc950802b391cb9dd7043a44b7/A of 328fe2cc950802b391cb9dd7043a44b7 into 451cdc9d616c4dbab377d30e639230d0(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:15,894 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:15,894 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7., storeName=328fe2cc950802b391cb9dd7043a44b7/A, priority=13, startTime=1732141455029; duration=0sec 2024-11-20T22:24:15,894 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:15,894 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 328fe2cc950802b391cb9dd7043a44b7:A 2024-11-20T22:24:15,945 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=389, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d7ed4aa714d64d21a1a4ac331d7ed490 2024-11-20T22:24:15,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/7da1981b945b45c2ab1693471c90a75d is 50, key is test_row_0/B:col10/1732141454177/Put/seqid=0 2024-11-20T22:24:15,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742047_1223 (size=12301) 2024-11-20T22:24:16,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T22:24:16,358 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/7da1981b945b45c2ab1693471c90a75d 2024-11-20T22:24:16,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/a4f0dafb35b94f8fa2330e4ff8099192 is 50, key is test_row_0/C:col10/1732141454177/Put/seqid=0 2024-11-20T22:24:16,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742048_1224 (size=12301) 2024-11-20T22:24:16,770 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/a4f0dafb35b94f8fa2330e4ff8099192 2024-11-20T22:24:16,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/d7ed4aa714d64d21a1a4ac331d7ed490 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d7ed4aa714d64d21a1a4ac331d7ed490 2024-11-20T22:24:16,784 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d7ed4aa714d64d21a1a4ac331d7ed490, entries=150, sequenceid=389, filesize=30.5 K 2024-11-20T22:24:16,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/7da1981b945b45c2ab1693471c90a75d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/7da1981b945b45c2ab1693471c90a75d 2024-11-20T22:24:16,789 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/7da1981b945b45c2ab1693471c90a75d, entries=150, sequenceid=389, filesize=12.0 K 2024-11-20T22:24:16,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/a4f0dafb35b94f8fa2330e4ff8099192 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/a4f0dafb35b94f8fa2330e4ff8099192 2024-11-20T22:24:16,793 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/a4f0dafb35b94f8fa2330e4ff8099192, entries=150, sequenceid=389, filesize=12.0 K 2024-11-20T22:24:16,794 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=6.71 KB/6870 for 328fe2cc950802b391cb9dd7043a44b7 in 1695ms, sequenceid=389, compaction requested=false 2024-11-20T22:24:16,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:16,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:16,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-20T22:24:16,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-20T22:24:16,796 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-20T22:24:16,796 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7740 sec 2024-11-20T22:24:16,797 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 2.7780 sec 2024-11-20T22:24:18,086 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T22:24:18,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T22:24:18,131 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-20T22:24:21,381 DEBUG [Thread-592 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32168855 to 127.0.0.1:51822 2024-11-20T22:24:21,381 DEBUG [Thread-592 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:23,335 DEBUG [Thread-588 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53af6163 to 127.0.0.1:51822 2024-11-20T22:24:23,335 DEBUG [Thread-588 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:23,378 DEBUG [Thread-586 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2aa409d0 to 127.0.0.1:51822 2024-11-20T22:24:23,378 DEBUG [Thread-586 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:23,394 DEBUG [Thread-594 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40832d66 to 127.0.0.1:51822 2024-11-20T22:24:23,395 DEBUG [Thread-594 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 37 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 161 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2621 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2604 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1169 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3507 rows 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1150 2024-11-20T22:24:23,395 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3450 rows 2024-11-20T22:24:23,395 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:24:23,395 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3242ee55 to 127.0.0.1:51822 2024-11-20T22:24:23,395 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:23,402 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T22:24:23,403 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T22:24:23,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:23,406 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141463406"}]},"ts":"1732141463406"} 2024-11-20T22:24:23,408 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T22:24:23,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T22:24:23,419 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T22:24:23,420 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:24:23,424 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=328fe2cc950802b391cb9dd7043a44b7, UNASSIGN}] 2024-11-20T22:24:23,425 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=328fe2cc950802b391cb9dd7043a44b7, UNASSIGN 2024-11-20T22:24:23,428 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=328fe2cc950802b391cb9dd7043a44b7, regionState=CLOSING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:23,430 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:24:23,430 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure 328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:24:23,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T22:24:23,582 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:23,583 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:23,583 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:24:23,583 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing 328fe2cc950802b391cb9dd7043a44b7, disabling compactions & flushes 2024-11-20T22:24:23,583 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:23,583 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:23,583 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. after waiting 0 ms 2024-11-20T22:24:23,583 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:23,584 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing 328fe2cc950802b391cb9dd7043a44b7 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T22:24:23,584 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=A 2024-11-20T22:24:23,584 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:23,584 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=B 2024-11-20T22:24:23,584 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:23,584 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 328fe2cc950802b391cb9dd7043a44b7, store=C 2024-11-20T22:24:23,584 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:23,600 DEBUG [master/6365a1e51efd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 451355caa9251e00fdfd2d0a5e7a8871 changed from -1.0 to 0.0, refreshing cache 2024-11-20T22:24:23,630 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112087e078c03bcd4f8897ec72341274d3f9_328fe2cc950802b391cb9dd7043a44b7 is 50, key is test_row_0/A:col10/1732141463393/Put/seqid=0 2024-11-20T22:24:23,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742049_1225 (size=9914) 2024-11-20T22:24:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T22:24:24,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T22:24:24,092 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:24,112 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112087e078c03bcd4f8897ec72341274d3f9_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112087e078c03bcd4f8897ec72341274d3f9_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:24,122 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/997fe82919ff4c908857318cb976998d, store: [table=TestAcidGuarantees family=A region=328fe2cc950802b391cb9dd7043a44b7] 2024-11-20T22:24:24,123 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/997fe82919ff4c908857318cb976998d is 175, key is test_row_0/A:col10/1732141463393/Put/seqid=0 2024-11-20T22:24:24,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742050_1226 (size=22561) 2024-11-20T22:24:24,165 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=400, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/997fe82919ff4c908857318cb976998d 2024-11-20T22:24:24,190 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/97406e62d3f443fb984851aebbc0d49c is 50, key is test_row_0/B:col10/1732141463393/Put/seqid=0 2024-11-20T22:24:24,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742051_1227 (size=9857) 2024-11-20T22:24:24,232 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/97406e62d3f443fb984851aebbc0d49c 2024-11-20T22:24:24,250 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/7ca3b6c06ce143e3a97e4a569420ff2f is 50, key is test_row_0/C:col10/1732141463393/Put/seqid=0 2024-11-20T22:24:24,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742052_1228 (size=9857) 2024-11-20T22:24:24,299 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/7ca3b6c06ce143e3a97e4a569420ff2f 2024-11-20T22:24:24,341 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/A/997fe82919ff4c908857318cb976998d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/997fe82919ff4c908857318cb976998d 2024-11-20T22:24:24,348 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/997fe82919ff4c908857318cb976998d, entries=100, sequenceid=400, filesize=22.0 K 2024-11-20T22:24:24,349 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/B/97406e62d3f443fb984851aebbc0d49c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/97406e62d3f443fb984851aebbc0d49c 2024-11-20T22:24:24,356 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/97406e62d3f443fb984851aebbc0d49c, entries=100, sequenceid=400, filesize=9.6 K 2024-11-20T22:24:24,357 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/.tmp/C/7ca3b6c06ce143e3a97e4a569420ff2f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7ca3b6c06ce143e3a97e4a569420ff2f 2024-11-20T22:24:24,363 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7ca3b6c06ce143e3a97e4a569420ff2f, entries=100, sequenceid=400, filesize=9.6 K 2024-11-20T22:24:24,366 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 328fe2cc950802b391cb9dd7043a44b7 in 783ms, sequenceid=400, compaction requested=true 2024-11-20T22:24:24,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8b86505a3ed342979bc81cd786172a1e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a9d2f0ec98b04394ab5cb077a918c92a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/82301e7580f943b0a372da152341d34b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8526e2281bf84975be38065040f37de0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/f8a69baae4d74b0cbe2334e2af76ccfc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/22030bc21d6643869411078cbc27ffa5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/100c3d6e913c4c33ab6c851bc3ded418, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/fa95887b1c6f4e9b9e214f2b899389b0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/1091d292d56740478d0de0d81aebeeff, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d4aa23f7841345a1afbb021ef5f0b0f4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/272d1c466f254691b11c9869cfcaa6b1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/215095e572ae429a9e5e88e82b077d4d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/27c30ae5df0543c8b6b1d4dcdadf971d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/6a28d226e77c4c1498f2fe6d1e5372f7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/eb856bc4ce7449bbaaa4a09e1647ec9f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/9705a940649d422a90c54b1acbe92f69, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/62e4fc90926941c6b7c78fa3b2e6f05b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d14f33e6ede6459a98505efde89405c5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/78fbe6b3f2b64f649ed9a4881fd9f7bb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d1142fa1075c4d8b9f4ee65bd29b173f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/771a15a1c280470eafa06918eb1403b0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a883a888924a4b1fa55e773fc9a88533, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/b6c45626d3f142aa9292fe3a697d1f11, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/0eb810fd14bc443b9c6b61e06de71832, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/818c3e944dee48cb8f2eaa92b8628fc4] to archive 2024-11-20T22:24:24,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:24:24,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8b86505a3ed342979bc81cd786172a1e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8b86505a3ed342979bc81cd786172a1e 2024-11-20T22:24:24,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a9d2f0ec98b04394ab5cb077a918c92a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a9d2f0ec98b04394ab5cb077a918c92a 2024-11-20T22:24:24,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/82301e7580f943b0a372da152341d34b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/82301e7580f943b0a372da152341d34b 2024-11-20T22:24:24,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8526e2281bf84975be38065040f37de0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/8526e2281bf84975be38065040f37de0 2024-11-20T22:24:24,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/f8a69baae4d74b0cbe2334e2af76ccfc to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/f8a69baae4d74b0cbe2334e2af76ccfc 2024-11-20T22:24:24,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/22030bc21d6643869411078cbc27ffa5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/22030bc21d6643869411078cbc27ffa5 2024-11-20T22:24:24,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/100c3d6e913c4c33ab6c851bc3ded418 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/100c3d6e913c4c33ab6c851bc3ded418 2024-11-20T22:24:24,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/fa95887b1c6f4e9b9e214f2b899389b0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/fa95887b1c6f4e9b9e214f2b899389b0 2024-11-20T22:24:24,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/1091d292d56740478d0de0d81aebeeff to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/1091d292d56740478d0de0d81aebeeff 2024-11-20T22:24:24,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d4aa23f7841345a1afbb021ef5f0b0f4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d4aa23f7841345a1afbb021ef5f0b0f4 2024-11-20T22:24:24,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/272d1c466f254691b11c9869cfcaa6b1 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/272d1c466f254691b11c9869cfcaa6b1 2024-11-20T22:24:24,408 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/215095e572ae429a9e5e88e82b077d4d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/215095e572ae429a9e5e88e82b077d4d 2024-11-20T22:24:24,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/27c30ae5df0543c8b6b1d4dcdadf971d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/27c30ae5df0543c8b6b1d4dcdadf971d 2024-11-20T22:24:24,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/6a28d226e77c4c1498f2fe6d1e5372f7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/6a28d226e77c4c1498f2fe6d1e5372f7 2024-11-20T22:24:24,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/eb856bc4ce7449bbaaa4a09e1647ec9f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/eb856bc4ce7449bbaaa4a09e1647ec9f 2024-11-20T22:24:24,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/9705a940649d422a90c54b1acbe92f69 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/9705a940649d422a90c54b1acbe92f69 2024-11-20T22:24:24,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/62e4fc90926941c6b7c78fa3b2e6f05b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/62e4fc90926941c6b7c78fa3b2e6f05b 2024-11-20T22:24:24,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d14f33e6ede6459a98505efde89405c5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d14f33e6ede6459a98505efde89405c5 2024-11-20T22:24:24,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/78fbe6b3f2b64f649ed9a4881fd9f7bb to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/78fbe6b3f2b64f649ed9a4881fd9f7bb 2024-11-20T22:24:24,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d1142fa1075c4d8b9f4ee65bd29b173f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d1142fa1075c4d8b9f4ee65bd29b173f 2024-11-20T22:24:24,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/771a15a1c280470eafa06918eb1403b0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/771a15a1c280470eafa06918eb1403b0 2024-11-20T22:24:24,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a883a888924a4b1fa55e773fc9a88533 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/a883a888924a4b1fa55e773fc9a88533 2024-11-20T22:24:24,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/b6c45626d3f142aa9292fe3a697d1f11 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/b6c45626d3f142aa9292fe3a697d1f11 2024-11-20T22:24:24,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/0eb810fd14bc443b9c6b61e06de71832 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/0eb810fd14bc443b9c6b61e06de71832 2024-11-20T22:24:24,438 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/818c3e944dee48cb8f2eaa92b8628fc4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/818c3e944dee48cb8f2eaa92b8628fc4 2024-11-20T22:24:24,448 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/08b43a97f99241baba323920fc484273, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/ae49ea93baba4b248688f960b642b947, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/ee4e2b5c702143578206564a404cded6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/c06311a76c7547a6a518c6f23679f144, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f4dc4371d6b64b62b5813a27771e4597, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f4fc9f6dcfab4e2cb47e5f86b30ca181, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/760f00717979453d9d55ac19fa092034, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/26eb7e13e5494c27a96ce8ecea9e6bac, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/43c8ddc18d934846863c583c65d492c7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/77d572865dc1447285b4f775d77e9275, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/716586d81bd541429cb72479682bf453, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/2c67d8ce7e8948e4995517da8f4c7959, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/56f8fbb3ea824892b806d20eb82508f3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/a227d2a0b1404b7f92562c06266b8665, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/022be98f04844cd892e0718ac553831a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/49b8d6e94d8544cab1d06b2ea729dc61, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/a88e9b77b57f4fcf8530ac73c5269ad4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/c4f11816b0024aaaa79d06b8346c252d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/00b46b9194894ccc8f00b711d16fc1c6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f10803312a994b8c9136aad32bd1cbe8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/43e1289798844d9e9a518cd48c3c657b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/965ab00916b64a2e91f4ade8d5d5f8db, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/3a2effc828954764895ff39fdf0558aa, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/8b41ea37415d41748fe3d1e71365fd1d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/5c81fc03693645a5a2252a4f5f8a55cf] to archive 2024-11-20T22:24:24,449 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:24:24,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/08b43a97f99241baba323920fc484273 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/08b43a97f99241baba323920fc484273 2024-11-20T22:24:24,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/ae49ea93baba4b248688f960b642b947 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/ae49ea93baba4b248688f960b642b947 2024-11-20T22:24:24,455 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/ee4e2b5c702143578206564a404cded6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/ee4e2b5c702143578206564a404cded6 2024-11-20T22:24:24,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/c06311a76c7547a6a518c6f23679f144 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/c06311a76c7547a6a518c6f23679f144 2024-11-20T22:24:24,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f4dc4371d6b64b62b5813a27771e4597 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f4dc4371d6b64b62b5813a27771e4597 2024-11-20T22:24:24,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f4fc9f6dcfab4e2cb47e5f86b30ca181 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f4fc9f6dcfab4e2cb47e5f86b30ca181 2024-11-20T22:24:24,462 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/760f00717979453d9d55ac19fa092034 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/760f00717979453d9d55ac19fa092034 2024-11-20T22:24:24,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/26eb7e13e5494c27a96ce8ecea9e6bac to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/26eb7e13e5494c27a96ce8ecea9e6bac 2024-11-20T22:24:24,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/43c8ddc18d934846863c583c65d492c7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/43c8ddc18d934846863c583c65d492c7 2024-11-20T22:24:24,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/77d572865dc1447285b4f775d77e9275 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/77d572865dc1447285b4f775d77e9275 2024-11-20T22:24:24,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/716586d81bd541429cb72479682bf453 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/716586d81bd541429cb72479682bf453 2024-11-20T22:24:24,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/2c67d8ce7e8948e4995517da8f4c7959 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/2c67d8ce7e8948e4995517da8f4c7959 2024-11-20T22:24:24,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/56f8fbb3ea824892b806d20eb82508f3 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/56f8fbb3ea824892b806d20eb82508f3 2024-11-20T22:24:24,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/a227d2a0b1404b7f92562c06266b8665 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/a227d2a0b1404b7f92562c06266b8665 2024-11-20T22:24:24,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/022be98f04844cd892e0718ac553831a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/022be98f04844cd892e0718ac553831a 2024-11-20T22:24:24,481 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/49b8d6e94d8544cab1d06b2ea729dc61 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/49b8d6e94d8544cab1d06b2ea729dc61 2024-11-20T22:24:24,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/a88e9b77b57f4fcf8530ac73c5269ad4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/a88e9b77b57f4fcf8530ac73c5269ad4 2024-11-20T22:24:24,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/c4f11816b0024aaaa79d06b8346c252d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/c4f11816b0024aaaa79d06b8346c252d 2024-11-20T22:24:24,486 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/00b46b9194894ccc8f00b711d16fc1c6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/00b46b9194894ccc8f00b711d16fc1c6 2024-11-20T22:24:24,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f10803312a994b8c9136aad32bd1cbe8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/f10803312a994b8c9136aad32bd1cbe8 2024-11-20T22:24:24,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/43e1289798844d9e9a518cd48c3c657b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/43e1289798844d9e9a518cd48c3c657b 2024-11-20T22:24:24,492 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/965ab00916b64a2e91f4ade8d5d5f8db to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/965ab00916b64a2e91f4ade8d5d5f8db 2024-11-20T22:24:24,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/3a2effc828954764895ff39fdf0558aa to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/3a2effc828954764895ff39fdf0558aa 2024-11-20T22:24:24,497 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/8b41ea37415d41748fe3d1e71365fd1d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/8b41ea37415d41748fe3d1e71365fd1d 2024-11-20T22:24:24,501 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/5c81fc03693645a5a2252a4f5f8a55cf to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/5c81fc03693645a5a2252a4f5f8a55cf 2024-11-20T22:24:24,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4f7bf5807c974acc8984d86935e5fd53, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/804a240d580248589fc9453f005b879f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4e82da334502494ba04f340e037c697a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7dd2dd43aa7d4e2fa98248d925dd3171, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/1a2ed107dc9c45e59f23a2023fa6d007, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/9bbe160310f948eebbaab8e5bc5ca8b0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/243b9853092b4bc58c5648b9a5c8cec8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/cb6694a05da1479bb0028fb9916f5620, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/c9f9d175ad0d4924b49d030245e8c352, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/408169e10bd84091bec341b6918bed48, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7b1e1a9416174f10bf2919aaeeaf9643, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/74caf10f13d944d7b1aa698c473d3d41, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/87126edc615d4d70ad0cd4c400931696, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/631de4c1d12844a79a95a14813afee23, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/83e06f7901a44a2fae91d7ca83a21f0e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/79977a90708f49dfb3bd70caa2dd48b6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/b26a9d979165460081f4c70fbf1c85f1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/0201b03ebc844a6dbda74275e2272442, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/d37774bf6881493cbaaf1e139b7fa1e7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4576c64870e74a838bc4c557c35ddb6e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/05b7b93bd86e457f857dcd0f7ac290cb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/e03197e5a019458ba29541f4ddb0bb3f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/0ee5862b859341d0b89b7c9f070ea173, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/ce17b97556c04d3cadc0badf93da54a7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/22804ae344ce42efba55a903b6117ba4] to archive 2024-11-20T22:24:24,518 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:24:24,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4f7bf5807c974acc8984d86935e5fd53 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4f7bf5807c974acc8984d86935e5fd53 2024-11-20T22:24:24,522 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/804a240d580248589fc9453f005b879f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/804a240d580248589fc9453f005b879f 2024-11-20T22:24:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T22:24:24,524 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4e82da334502494ba04f340e037c697a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4e82da334502494ba04f340e037c697a 2024-11-20T22:24:24,526 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7dd2dd43aa7d4e2fa98248d925dd3171 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7dd2dd43aa7d4e2fa98248d925dd3171 2024-11-20T22:24:24,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/1a2ed107dc9c45e59f23a2023fa6d007 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/1a2ed107dc9c45e59f23a2023fa6d007 2024-11-20T22:24:24,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/9bbe160310f948eebbaab8e5bc5ca8b0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/9bbe160310f948eebbaab8e5bc5ca8b0 2024-11-20T22:24:24,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/243b9853092b4bc58c5648b9a5c8cec8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/243b9853092b4bc58c5648b9a5c8cec8 2024-11-20T22:24:24,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/cb6694a05da1479bb0028fb9916f5620 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/cb6694a05da1479bb0028fb9916f5620 2024-11-20T22:24:24,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/c9f9d175ad0d4924b49d030245e8c352 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/c9f9d175ad0d4924b49d030245e8c352 2024-11-20T22:24:24,541 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/408169e10bd84091bec341b6918bed48 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/408169e10bd84091bec341b6918bed48 2024-11-20T22:24:24,542 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7b1e1a9416174f10bf2919aaeeaf9643 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7b1e1a9416174f10bf2919aaeeaf9643 2024-11-20T22:24:24,544 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/74caf10f13d944d7b1aa698c473d3d41 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/74caf10f13d944d7b1aa698c473d3d41 2024-11-20T22:24:24,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/87126edc615d4d70ad0cd4c400931696 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/87126edc615d4d70ad0cd4c400931696 2024-11-20T22:24:24,548 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/631de4c1d12844a79a95a14813afee23 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/631de4c1d12844a79a95a14813afee23 2024-11-20T22:24:24,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/83e06f7901a44a2fae91d7ca83a21f0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/83e06f7901a44a2fae91d7ca83a21f0e 2024-11-20T22:24:24,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/79977a90708f49dfb3bd70caa2dd48b6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/79977a90708f49dfb3bd70caa2dd48b6 2024-11-20T22:24:24,575 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/b26a9d979165460081f4c70fbf1c85f1 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/b26a9d979165460081f4c70fbf1c85f1 2024-11-20T22:24:24,581 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/0201b03ebc844a6dbda74275e2272442 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/0201b03ebc844a6dbda74275e2272442 2024-11-20T22:24:24,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/d37774bf6881493cbaaf1e139b7fa1e7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/d37774bf6881493cbaaf1e139b7fa1e7 2024-11-20T22:24:24,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4576c64870e74a838bc4c557c35ddb6e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/4576c64870e74a838bc4c557c35ddb6e 2024-11-20T22:24:24,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/05b7b93bd86e457f857dcd0f7ac290cb to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/05b7b93bd86e457f857dcd0f7ac290cb 2024-11-20T22:24:24,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/e03197e5a019458ba29541f4ddb0bb3f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/e03197e5a019458ba29541f4ddb0bb3f 2024-11-20T22:24:24,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/0ee5862b859341d0b89b7c9f070ea173 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/0ee5862b859341d0b89b7c9f070ea173 2024-11-20T22:24:24,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/ce17b97556c04d3cadc0badf93da54a7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/ce17b97556c04d3cadc0badf93da54a7 2024-11-20T22:24:24,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/22804ae344ce42efba55a903b6117ba4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/22804ae344ce42efba55a903b6117ba4 2024-11-20T22:24:24,646 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/recovered.edits/403.seqid, newMaxSeqId=403, maxSeqId=4 2024-11-20T22:24:24,647 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7. 2024-11-20T22:24:24,647 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for 328fe2cc950802b391cb9dd7043a44b7: 2024-11-20T22:24:24,650 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed 328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:24,651 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=328fe2cc950802b391cb9dd7043a44b7, regionState=CLOSED 2024-11-20T22:24:24,672 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-20T22:24:24,672 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure 328fe2cc950802b391cb9dd7043a44b7, server=6365a1e51efd,44631,1732141399950 in 1.2400 sec 2024-11-20T22:24:24,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-20T22:24:24,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=328fe2cc950802b391cb9dd7043a44b7, UNASSIGN in 1.2480 sec 2024-11-20T22:24:24,683 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-20T22:24:24,683 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.2600 sec 2024-11-20T22:24:24,685 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141464684"}]},"ts":"1732141464684"} 2024-11-20T22:24:24,688 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T22:24:24,763 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T22:24:24,765 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.3610 sec 2024-11-20T22:24:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T22:24:25,524 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-20T22:24:25,524 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T22:24:25,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:25,526 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:25,527 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:25,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T22:24:25,535 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,546 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/recovered.edits] 2024-11-20T22:24:25,562 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/451cdc9d616c4dbab377d30e639230d0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/451cdc9d616c4dbab377d30e639230d0 2024-11-20T22:24:25,568 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/997fe82919ff4c908857318cb976998d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/997fe82919ff4c908857318cb976998d 2024-11-20T22:24:25,580 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d7ed4aa714d64d21a1a4ac331d7ed490 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/A/d7ed4aa714d64d21a1a4ac331d7ed490 2024-11-20T22:24:25,588 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/7da1981b945b45c2ab1693471c90a75d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/7da1981b945b45c2ab1693471c90a75d 2024-11-20T22:24:25,592 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/97406e62d3f443fb984851aebbc0d49c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/97406e62d3f443fb984851aebbc0d49c 2024-11-20T22:24:25,595 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/dd1c0d2994304746915b085adc1af111 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/B/dd1c0d2994304746915b085adc1af111 2024-11-20T22:24:25,615 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/729573e259d54812a0db0dba4643573e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/729573e259d54812a0db0dba4643573e 2024-11-20T22:24:25,617 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7ca3b6c06ce143e3a97e4a569420ff2f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/7ca3b6c06ce143e3a97e4a569420ff2f 2024-11-20T22:24:25,620 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/a4f0dafb35b94f8fa2330e4ff8099192 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/C/a4f0dafb35b94f8fa2330e4ff8099192 2024-11-20T22:24:25,633 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/recovered.edits/403.seqid to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7/recovered.edits/403.seqid 2024-11-20T22:24:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T22:24:25,643 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,643 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T22:24:25,647 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T22:24:25,648 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T22:24:25,658 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120017ead8f706c4e118a3a15c4efe3993c_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120017ead8f706c4e118a3a15c4efe3993c_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,665 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200e727923923d499491c223102bf7a42c_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200e727923923d499491c223102bf7a42c_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,667 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201187f22096234d8881f565232230630c_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201187f22096234d8881f565232230630c_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,669 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201f8bc1bc35c448b98f0c5b69436cf060_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201f8bc1bc35c448b98f0c5b69436cf060_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,676 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112025e07fbe50e14b09b792eb6967209265_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112025e07fbe50e14b09b792eb6967209265_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,681 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202638d8f65daf489c96f6f2a477b7d5bc_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202638d8f65daf489c96f6f2a477b7d5bc_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,694 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203e9654e6e17a4e41bbe494f68bae88ab_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203e9654e6e17a4e41bbe494f68bae88ab_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,697 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112046dcb017240a4d0ea490094e7520854f_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112046dcb017240a4d0ea490094e7520854f_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,700 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112048efb5daa3d94aa7b00e27a5174011f7_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112048efb5daa3d94aa7b00e27a5174011f7_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,703 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205052c5dafc604a238d79403ba6c6294d_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205052c5dafc604a238d79403ba6c6294d_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,705 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112056c40b9b687249098a404d1997127fa9_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112056c40b9b687249098a404d1997127fa9_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,707 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112068d9bf53cfb64478bea563573ca9730e_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112068d9bf53cfb64478bea563573ca9730e_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,709 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207b32a557721744139983f580681e81c2_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207b32a557721744139983f580681e81c2_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,710 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112087e078c03bcd4f8897ec72341274d3f9_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112087e078c03bcd4f8897ec72341274d3f9_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,712 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208c3b182cfd054848a2598735e5f8256b_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208c3b182cfd054848a2598735e5f8256b_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,715 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120924110d847a342d9958379053c428b62_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120924110d847a342d9958379053c428b62_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,718 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120be89ff415acc4783a27eb651f5185afb_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120be89ff415acc4783a27eb651f5185afb_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,720 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d6ea4bc6d86d4825811ff4e60293bd22_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d6ea4bc6d86d4825811ff4e60293bd22_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,722 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120da3132a356df4876a877d056017afafd_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120da3132a356df4876a877d056017afafd_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,724 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f535fa2e8c0143b8a7d3188f3c48e652_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f535fa2e8c0143b8a7d3188f3c48e652_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,726 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f8747207ad064ef38a055e01a9ba1800_328fe2cc950802b391cb9dd7043a44b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f8747207ad064ef38a055e01a9ba1800_328fe2cc950802b391cb9dd7043a44b7 2024-11-20T22:24:25,728 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T22:24:25,734 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:25,741 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T22:24:25,747 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T22:24:25,749 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:25,749 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T22:24:25,750 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732141465749"}]},"ts":"9223372036854775807"} 2024-11-20T22:24:25,758 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T22:24:25,759 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 328fe2cc950802b391cb9dd7043a44b7, NAME => 'TestAcidGuarantees,,1732141431607.328fe2cc950802b391cb9dd7043a44b7.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T22:24:25,759 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T22:24:25,759 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732141465759"}]},"ts":"9223372036854775807"} 2024-11-20T22:24:25,762 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T22:24:25,811 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:25,812 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 286 msec 2024-11-20T22:24:25,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T22:24:25,835 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-20T22:24:25,854 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=237 (was 240), OpenFileDescriptor=451 (was 458), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1058 (was 814) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2356 (was 2361) 2024-11-20T22:24:25,884 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=237, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=1058, ProcessCount=11, AvailableMemoryMB=2353 2024-11-20T22:24:25,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:24:25,886 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:24:25,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:25,889 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:24:25,889 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:25,890 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:24:25,891 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-11-20T22:24:25,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T22:24:25,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742053_1229 (size=960) 2024-11-20T22:24:25,963 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a 2024-11-20T22:24:25,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T22:24:26,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742054_1230 (size=53) 2024-11-20T22:24:26,028 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:24:26,028 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 1ae43dfbfefd3b112decface0ed50cc2, disabling compactions & flushes 2024-11-20T22:24:26,028 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:26,028 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:26,028 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. after waiting 0 ms 2024-11-20T22:24:26,028 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:26,028 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:26,028 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:26,030 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:24:26,030 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732141466030"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141466030"}]},"ts":"1732141466030"} 2024-11-20T22:24:26,031 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:24:26,033 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:24:26,033 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141466033"}]},"ts":"1732141466033"} 2024-11-20T22:24:26,034 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T22:24:26,063 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1ae43dfbfefd3b112decface0ed50cc2, ASSIGN}] 2024-11-20T22:24:26,065 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1ae43dfbfefd3b112decface0ed50cc2, ASSIGN 2024-11-20T22:24:26,066 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=1ae43dfbfefd3b112decface0ed50cc2, ASSIGN; state=OFFLINE, location=6365a1e51efd,44631,1732141399950; forceNewPlan=false, retain=false 2024-11-20T22:24:26,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T22:24:26,227 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=1ae43dfbfefd3b112decface0ed50cc2, regionState=OPENING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:26,228 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure 1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:24:26,381 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:26,384 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:26,384 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:24:26,385 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:26,385 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:24:26,385 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:26,385 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:26,386 INFO [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:26,388 INFO [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:26,388 INFO [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1ae43dfbfefd3b112decface0ed50cc2 columnFamilyName A 2024-11-20T22:24:26,388 DEBUG [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:26,389 INFO [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] regionserver.HStore(327): Store=1ae43dfbfefd3b112decface0ed50cc2/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:26,389 INFO [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:26,390 INFO [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:26,390 INFO [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1ae43dfbfefd3b112decface0ed50cc2 columnFamilyName B 2024-11-20T22:24:26,390 DEBUG [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:26,391 INFO [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] regionserver.HStore(327): Store=1ae43dfbfefd3b112decface0ed50cc2/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:26,391 INFO [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:26,393 INFO [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:26,393 INFO [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1ae43dfbfefd3b112decface0ed50cc2 columnFamilyName C 2024-11-20T22:24:26,393 DEBUG [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:26,394 INFO [StoreOpener-1ae43dfbfefd3b112decface0ed50cc2-1 {}] regionserver.HStore(327): Store=1ae43dfbfefd3b112decface0ed50cc2/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:26,394 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:26,395 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:26,395 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:26,397 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:24:26,398 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:26,400 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:24:26,401 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened 1ae43dfbfefd3b112decface0ed50cc2; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67828042, jitterRate=0.01071658730506897}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:24:26,401 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:26,402 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., pid=70, masterSystemTime=1732141466381 2024-11-20T22:24:26,404 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:26,404 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:26,404 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=1ae43dfbfefd3b112decface0ed50cc2, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:26,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-20T22:24:26,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure 1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 in 177 msec 2024-11-20T22:24:26,409 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-11-20T22:24:26,409 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1ae43dfbfefd3b112decface0ed50cc2, ASSIGN in 344 msec 2024-11-20T22:24:26,410 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:24:26,410 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141466410"}]},"ts":"1732141466410"} 2024-11-20T22:24:26,411 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T22:24:26,456 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:24:26,457 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 570 msec 2024-11-20T22:24:26,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T22:24:26,501 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-11-20T22:24:26,503 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x00faa31c to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@688f4c53 2024-11-20T22:24:26,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3eec6530, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:26,528 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:26,530 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:26,534 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:24:26,535 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51584, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:24:26,538 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22a568ce to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@305a451d 2024-11-20T22:24:26,563 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dc273c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:26,565 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05e0e280 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@67f02d8c 2024-11-20T22:24:26,599 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@195206da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:26,603 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x473477dd to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@21cebefa 2024-11-20T22:24:26,620 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@282318cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:26,622 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a91dc80 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e7c8846 2024-11-20T22:24:26,682 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ea91426, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:26,684 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a874cc0 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4093d76e 2024-11-20T22:24:26,731 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@762de37e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:26,733 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31178bc2 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2838b88d 2024-11-20T22:24:26,760 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@124edab0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:26,762 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78439bc6 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15d2a893 2024-11-20T22:24:26,783 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@712d7bc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:26,785 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a2545d0 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5ab3f837 2024-11-20T22:24:26,812 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40da73c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:26,813 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d039dc2 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2834a215 2024-11-20T22:24:26,848 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3be398a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:26,849 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x15db087a to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@187234de 2024-11-20T22:24:26,881 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ff3c1a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:26,891 DEBUG [hconnection-0x627ffc69-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:26,892 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39722, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:26,898 DEBUG [hconnection-0x54570118-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:26,898 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:26,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-20T22:24:26,900 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:26,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:26,977 DEBUG [hconnection-0xeaef610-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:26,978 DEBUG [hconnection-0x4a1dde49-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:26,979 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39742, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:26,979 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:26,979 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39752, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:26,979 DEBUG [hconnection-0x108109bf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:26,980 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39756, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:26,983 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:26,983 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:27,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:27,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:27,007 DEBUG [hconnection-0x786575f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:27,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:27,009 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39760, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:27,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:27,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:27,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:27,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:27,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:27,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:27,015 DEBUG [hconnection-0x6a043d24-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:27,019 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39770, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:27,023 DEBUG [hconnection-0x25cfb477-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:27,024 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39782, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:27,032 DEBUG [hconnection-0x752d985f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:27,034 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39792, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:27,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141527039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141527043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141527043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141527044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,054 DEBUG [hconnection-0x3cd2356b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:27,055 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:27,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141527061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,124 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/0ddb15b6d47341c088abd296a3ff57dc is 50, key is test_row_0/A:col10/1732141467007/Put/seqid=0 2024-11-20T22:24:27,138 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:27,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:27,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:27,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:27,143 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141527147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141527155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141527159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141527159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742055_1231 (size=12001) 2024-11-20T22:24:27,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141527173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:27,296 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:27,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:27,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:27,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:27,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141527359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141527373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141527375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141527375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141527388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,451 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:27,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:27,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:27,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:27,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:27,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/0ddb15b6d47341c088abd296a3ff57dc 2024-11-20T22:24:27,621 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:27,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:27,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:27,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:27,622 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/f9bca11bc79d4c6586ca722c5b75dbd4 is 50, key is test_row_0/B:col10/1732141467007/Put/seqid=0 2024-11-20T22:24:27,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141527669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141527683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141527683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141527683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141527695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742056_1232 (size=12001) 2024-11-20T22:24:27,775 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:27,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:27,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:27,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:27,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,928 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:27,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:27,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:27,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:27,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:27,929 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:28,085 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:28,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:28,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:28,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:28,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:28,086 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/f9bca11bc79d4c6586ca722c5b75dbd4 2024-11-20T22:24:28,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141528178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:28,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/83f6c1b5eace43abb193677c801a3f56 is 50, key is test_row_0/C:col10/1732141467007/Put/seqid=0 2024-11-20T22:24:28,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141528190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:28,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141528195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:28,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141528195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:28,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141528204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:28,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742057_1233 (size=12001) 2024-11-20T22:24:28,238 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:28,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:28,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:28,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:28,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:28,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,393 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:28,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:28,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:28,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:28,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:28,393 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,545 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:28,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:28,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:28,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:28,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:28,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:28,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/83f6c1b5eace43abb193677c801a3f56 2024-11-20T22:24:28,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/0ddb15b6d47341c088abd296a3ff57dc as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/0ddb15b6d47341c088abd296a3ff57dc 2024-11-20T22:24:28,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/0ddb15b6d47341c088abd296a3ff57dc, entries=150, sequenceid=14, filesize=11.7 K 2024-11-20T22:24:28,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/f9bca11bc79d4c6586ca722c5b75dbd4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/f9bca11bc79d4c6586ca722c5b75dbd4 2024-11-20T22:24:28,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/f9bca11bc79d4c6586ca722c5b75dbd4, entries=150, sequenceid=14, filesize=11.7 K 2024-11-20T22:24:28,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/83f6c1b5eace43abb193677c801a3f56 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/83f6c1b5eace43abb193677c801a3f56 2024-11-20T22:24:28,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/83f6c1b5eace43abb193677c801a3f56, entries=150, sequenceid=14, filesize=11.7 K 2024-11-20T22:24:28,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 1ae43dfbfefd3b112decface0ed50cc2 in 1647ms, sequenceid=14, compaction requested=false 2024-11-20T22:24:28,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:28,698 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:28,700 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:28,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:28,701 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:24:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:28,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/55106ddf21f147cc9bb9fc2d165fdb4c is 50, key is test_row_0/A:col10/1732141467046/Put/seqid=0 2024-11-20T22:24:28,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742058_1234 (size=12001) 2024-11-20T22:24:29,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:29,184 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/55106ddf21f147cc9bb9fc2d165fdb4c 2024-11-20T22:24:29,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:29,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:29,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/623514d9bf6f4305bf1d16a5836d7b3e is 50, key is test_row_0/B:col10/1732141467046/Put/seqid=0 2024-11-20T22:24:29,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141529230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141529240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141529252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742059_1235 (size=12001) 2024-11-20T22:24:29,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141529253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141529253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,270 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/623514d9bf6f4305bf1d16a5836d7b3e 2024-11-20T22:24:29,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/9c25b9943a7646f99669eb8a65c2fbed is 50, key is test_row_0/C:col10/1732141467046/Put/seqid=0 2024-11-20T22:24:29,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742060_1236 (size=12001) 2024-11-20T22:24:29,349 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/9c25b9943a7646f99669eb8a65c2fbed 2024-11-20T22:24:29,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141529354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/55106ddf21f147cc9bb9fc2d165fdb4c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/55106ddf21f147cc9bb9fc2d165fdb4c 2024-11-20T22:24:29,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141529363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141529362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141529364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141529371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,387 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/55106ddf21f147cc9bb9fc2d165fdb4c, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:24:29,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/623514d9bf6f4305bf1d16a5836d7b3e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/623514d9bf6f4305bf1d16a5836d7b3e 2024-11-20T22:24:29,403 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/623514d9bf6f4305bf1d16a5836d7b3e, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:24:29,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/9c25b9943a7646f99669eb8a65c2fbed as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/9c25b9943a7646f99669eb8a65c2fbed 2024-11-20T22:24:29,415 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/9c25b9943a7646f99669eb8a65c2fbed, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:24:29,416 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 1ae43dfbfefd3b112decface0ed50cc2 in 715ms, sequenceid=38, compaction requested=false 2024-11-20T22:24:29,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:29,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:29,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-20T22:24:29,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-20T22:24:29,421 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-20T22:24:29,421 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4370 sec 2024-11-20T22:24:29,423 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 2.5240 sec 2024-11-20T22:24:29,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:29,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:24:29,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:29,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:29,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:29,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:29,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:29,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:29,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/96c352d0948c4a51b607cc7650d46e0e is 50, key is test_row_0/A:col10/1732141469238/Put/seqid=0 2024-11-20T22:24:29,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141529619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141529623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141529625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141529627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141529632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742061_1237 (size=14341) 2024-11-20T22:24:29,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/96c352d0948c4a51b607cc7650d46e0e 2024-11-20T22:24:29,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/c39d965c15d340f381bd087ffb2c161e is 50, key is test_row_0/B:col10/1732141469238/Put/seqid=0 2024-11-20T22:24:29,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742062_1238 (size=12001) 2024-11-20T22:24:29,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/c39d965c15d340f381bd087ffb2c161e 2024-11-20T22:24:29,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141529734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141529741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/e2386bdcfc3e40d697592e3b7946c8a3 is 50, key is test_row_0/C:col10/1732141469238/Put/seqid=0 2024-11-20T22:24:29,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141529742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141529746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141529751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742063_1239 (size=12001) 2024-11-20T22:24:29,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141529944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141529949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141529955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141529957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:29,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141529974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/e2386bdcfc3e40d697592e3b7946c8a3 2024-11-20T22:24:30,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/96c352d0948c4a51b607cc7650d46e0e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/96c352d0948c4a51b607cc7650d46e0e 2024-11-20T22:24:30,203 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T22:24:30,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/96c352d0948c4a51b607cc7650d46e0e, entries=200, sequenceid=51, filesize=14.0 K 2024-11-20T22:24:30,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/c39d965c15d340f381bd087ffb2c161e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c39d965c15d340f381bd087ffb2c161e 2024-11-20T22:24:30,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c39d965c15d340f381bd087ffb2c161e, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T22:24:30,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141530252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/e2386bdcfc3e40d697592e3b7946c8a3 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/e2386bdcfc3e40d697592e3b7946c8a3 2024-11-20T22:24:30,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/e2386bdcfc3e40d697592e3b7946c8a3, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T22:24:30,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 1ae43dfbfefd3b112decface0ed50cc2 in 702ms, sequenceid=51, compaction requested=true 2024-11-20T22:24:30,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:30,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:30,268 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:30,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:30,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:30,268 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:30,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:30,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:30,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:30,283 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:30,283 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/A is initiating minor compaction (all files) 2024-11-20T22:24:30,283 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/A in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:30,283 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/0ddb15b6d47341c088abd296a3ff57dc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/55106ddf21f147cc9bb9fc2d165fdb4c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/96c352d0948c4a51b607cc7650d46e0e] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=37.4 K 2024-11-20T22:24:30,284 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:30,284 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/B is initiating minor compaction (all files) 2024-11-20T22:24:30,284 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/B in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:30,284 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/f9bca11bc79d4c6586ca722c5b75dbd4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/623514d9bf6f4305bf1d16a5836d7b3e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c39d965c15d340f381bd087ffb2c161e] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=35.2 K 2024-11-20T22:24:30,285 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting f9bca11bc79d4c6586ca722c5b75dbd4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732141467001 2024-11-20T22:24:30,285 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ddb15b6d47341c088abd296a3ff57dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732141467001 2024-11-20T22:24:30,286 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 623514d9bf6f4305bf1d16a5836d7b3e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141467032 2024-11-20T22:24:30,286 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55106ddf21f147cc9bb9fc2d165fdb4c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141467032 2024-11-20T22:24:30,287 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c39d965c15d340f381bd087ffb2c161e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141469208 2024-11-20T22:24:30,287 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96c352d0948c4a51b607cc7650d46e0e, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141469208 2024-11-20T22:24:30,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:30,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:24:30,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:30,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:30,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:30,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:30,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:30,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:30,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141530310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,326 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#A#compaction#192 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:30,326 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#B#compaction#193 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:30,326 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/a079e6617a1146ca8b15c6f6f93a1e3d is 50, key is test_row_0/A:col10/1732141469238/Put/seqid=0 2024-11-20T22:24:30,327 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/3765cec605ba402895eb212045a72b8e is 50, key is test_row_0/B:col10/1732141469238/Put/seqid=0 2024-11-20T22:24:30,332 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/1dd5bd04440a4d5d868e3da06039766a is 50, key is test_row_0/A:col10/1732141469624/Put/seqid=0 2024-11-20T22:24:30,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141530319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141530322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141530323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742064_1240 (size=12104) 2024-11-20T22:24:30,418 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/3765cec605ba402895eb212045a72b8e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/3765cec605ba402895eb212045a72b8e 2024-11-20T22:24:30,426 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/B of 1ae43dfbfefd3b112decface0ed50cc2 into 3765cec605ba402895eb212045a72b8e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:30,426 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:30,426 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/B, priority=13, startTime=1732141470268; duration=0sec 2024-11-20T22:24:30,430 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:30,430 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:B 2024-11-20T22:24:30,430 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:30,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141530424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,432 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:30,432 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/C is initiating minor compaction (all files) 2024-11-20T22:24:30,433 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/C in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:30,433 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/83f6c1b5eace43abb193677c801a3f56, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/9c25b9943a7646f99669eb8a65c2fbed, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/e2386bdcfc3e40d697592e3b7946c8a3] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=35.2 K 2024-11-20T22:24:30,434 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 83f6c1b5eace43abb193677c801a3f56, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732141467001 2024-11-20T22:24:30,434 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c25b9943a7646f99669eb8a65c2fbed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141467032 2024-11-20T22:24:30,435 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting e2386bdcfc3e40d697592e3b7946c8a3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141469208 2024-11-20T22:24:30,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742065_1241 (size=12104) 2024-11-20T22:24:30,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141530437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,456 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/a079e6617a1146ca8b15c6f6f93a1e3d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/a079e6617a1146ca8b15c6f6f93a1e3d 2024-11-20T22:24:30,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141530437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,465 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/A of 1ae43dfbfefd3b112decface0ed50cc2 into a079e6617a1146ca8b15c6f6f93a1e3d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:30,465 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:30,465 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/A, priority=13, startTime=1732141470268; duration=0sec 2024-11-20T22:24:30,465 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:30,465 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:A 2024-11-20T22:24:30,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141530447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742066_1242 (size=12001) 2024-11-20T22:24:30,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/1dd5bd04440a4d5d868e3da06039766a 2024-11-20T22:24:30,501 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#C#compaction#195 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:30,502 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/16a6a034a67f49668a438e14de202893 is 50, key is test_row_0/C:col10/1732141469238/Put/seqid=0 2024-11-20T22:24:30,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742067_1243 (size=12104) 2024-11-20T22:24:30,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/32fe3af0c6614c3cbf67f8c4e4c68cff is 50, key is test_row_0/B:col10/1732141469624/Put/seqid=0 2024-11-20T22:24:30,579 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/16a6a034a67f49668a438e14de202893 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/16a6a034a67f49668a438e14de202893 2024-11-20T22:24:30,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742068_1244 (size=12001) 2024-11-20T22:24:30,599 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/C of 1ae43dfbfefd3b112decface0ed50cc2 into 16a6a034a67f49668a438e14de202893(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:30,599 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:30,599 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/C, priority=13, startTime=1732141470268; duration=0sec 2024-11-20T22:24:30,600 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:30,600 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:C 2024-11-20T22:24:30,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141530633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141530660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141530662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141530679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141530755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141530942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141530967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141530989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141530989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:30,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/32fe3af0c6614c3cbf67f8c4e4c68cff 2024-11-20T22:24:31,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:31,011 INFO [Thread-1082 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-20T22:24:31,023 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:31,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-20T22:24:31,031 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:31,031 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:31,031 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:31,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T22:24:31,052 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/a25aba0e1bcc4938954431e2fe903500 is 50, key is test_row_0/C:col10/1732141469624/Put/seqid=0 2024-11-20T22:24:31,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742069_1245 (size=12001) 2024-11-20T22:24:31,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/a25aba0e1bcc4938954431e2fe903500 2024-11-20T22:24:31,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/1dd5bd04440a4d5d868e3da06039766a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1dd5bd04440a4d5d868e3da06039766a 2024-11-20T22:24:31,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T22:24:31,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1dd5bd04440a4d5d868e3da06039766a, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T22:24:31,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/32fe3af0c6614c3cbf67f8c4e4c68cff as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/32fe3af0c6614c3cbf67f8c4e4c68cff 2024-11-20T22:24:31,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/32fe3af0c6614c3cbf67f8c4e4c68cff, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T22:24:31,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/a25aba0e1bcc4938954431e2fe903500 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/a25aba0e1bcc4938954431e2fe903500 2024-11-20T22:24:31,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/a25aba0e1bcc4938954431e2fe903500, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T22:24:31,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 1ae43dfbfefd3b112decface0ed50cc2 in 887ms, sequenceid=77, compaction requested=false 2024-11-20T22:24:31,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:31,203 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T22:24:31,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:31,204 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T22:24:31,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:31,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:31,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:31,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:31,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:31,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:31,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/e7e05530614b49c3bd50d4dbfe1b22d8 is 50, key is test_row_0/A:col10/1732141470308/Put/seqid=0 2024-11-20T22:24:31,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742070_1246 (size=12001) 2024-11-20T22:24:31,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T22:24:31,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:31,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:31,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141531576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141531583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141531576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141531583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T22:24:31,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141531692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141531695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141531695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141531699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,715 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/e7e05530614b49c3bd50d4dbfe1b22d8 2024-11-20T22:24:31,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/65168271972f4df1b2f2fd6c55b4b899 is 50, key is test_row_0/B:col10/1732141470308/Put/seqid=0 2024-11-20T22:24:31,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141531767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742071_1247 (size=12001) 2024-11-20T22:24:31,820 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/65168271972f4df1b2f2fd6c55b4b899 2024-11-20T22:24:31,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/5e7f49a5e0de4a6a87c9efff8c99c249 is 50, key is test_row_0/C:col10/1732141470308/Put/seqid=0 2024-11-20T22:24:31,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141531899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141531900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141531902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141531916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:31,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742072_1248 (size=12001) 2024-11-20T22:24:32,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T22:24:32,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141532213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:32,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141532213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:32,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141532213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:32,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141532222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:32,330 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/5e7f49a5e0de4a6a87c9efff8c99c249 2024-11-20T22:24:32,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/e7e05530614b49c3bd50d4dbfe1b22d8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/e7e05530614b49c3bd50d4dbfe1b22d8 2024-11-20T22:24:32,384 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/e7e05530614b49c3bd50d4dbfe1b22d8, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T22:24:32,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/65168271972f4df1b2f2fd6c55b4b899 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/65168271972f4df1b2f2fd6c55b4b899 2024-11-20T22:24:32,415 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/65168271972f4df1b2f2fd6c55b4b899, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T22:24:32,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/5e7f49a5e0de4a6a87c9efff8c99c249 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/5e7f49a5e0de4a6a87c9efff8c99c249 2024-11-20T22:24:32,435 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/5e7f49a5e0de4a6a87c9efff8c99c249, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T22:24:32,443 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 1ae43dfbfefd3b112decface0ed50cc2 in 1240ms, sequenceid=90, compaction requested=true 2024-11-20T22:24:32,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:32,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:32,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-20T22:24:32,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-20T22:24:32,453 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-20T22:24:32,453 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4180 sec 2024-11-20T22:24:32,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.4310 sec 2024-11-20T22:24:32,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T22:24:32,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:32,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:32,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:32,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:32,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:32,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:32,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:32,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/b571feffea6a4576a67e28d408af0eb9 is 50, key is test_row_0/A:col10/1732141471539/Put/seqid=0 2024-11-20T22:24:32,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141532763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:32,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141532776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:32,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141532797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:32,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141532807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:32,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742073_1249 (size=14341) 2024-11-20T22:24:32,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/b571feffea6a4576a67e28d408af0eb9 2024-11-20T22:24:32,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141532908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:32,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141532908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:32,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141532927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:32,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/1a1c4548a5814eb6858649fc77aa61f6 is 50, key is test_row_0/B:col10/1732141471539/Put/seqid=0 2024-11-20T22:24:32,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141532939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:33,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742074_1250 (size=12001) 2024-11-20T22:24:33,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141533124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:33,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141533128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:33,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T22:24:33,141 INFO [Thread-1082 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-20T22:24:33,154 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:33,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141533151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:33,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-20T22:24:33,165 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:33,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T22:24:33,172 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:33,172 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:33,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141533167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:33,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T22:24:33,330 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:33,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T22:24:33,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:33,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:33,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:33,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/1a1c4548a5814eb6858649fc77aa61f6 2024-11-20T22:24:33,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/67203502698f41e18aab95acf3967314 is 50, key is test_row_0/C:col10/1732141471539/Put/seqid=0 2024-11-20T22:24:33,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141533443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:33,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141533443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:33,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T22:24:33,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141533475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:33,483 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:33,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141533479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:33,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T22:24:33,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:33,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:33,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:33,491 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742075_1251 (size=12001) 2024-11-20T22:24:33,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/67203502698f41e18aab95acf3967314 2024-11-20T22:24:33,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/b571feffea6a4576a67e28d408af0eb9 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/b571feffea6a4576a67e28d408af0eb9 2024-11-20T22:24:33,528 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/b571feffea6a4576a67e28d408af0eb9, entries=200, sequenceid=118, filesize=14.0 K 2024-11-20T22:24:33,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/1a1c4548a5814eb6858649fc77aa61f6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/1a1c4548a5814eb6858649fc77aa61f6 2024-11-20T22:24:33,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/1a1c4548a5814eb6858649fc77aa61f6, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T22:24:33,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/67203502698f41e18aab95acf3967314 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/67203502698f41e18aab95acf3967314 2024-11-20T22:24:33,558 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/67203502698f41e18aab95acf3967314, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T22:24:33,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 1ae43dfbfefd3b112decface0ed50cc2 in 831ms, sequenceid=118, compaction requested=true 2024-11-20T22:24:33,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:33,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:33,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:33,563 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:33,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:33,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:33,563 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:33,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:33,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:33,568 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:33,568 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/B is initiating minor compaction (all files) 2024-11-20T22:24:33,568 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/B in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:33,568 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/3765cec605ba402895eb212045a72b8e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/32fe3af0c6614c3cbf67f8c4e4c68cff, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/65168271972f4df1b2f2fd6c55b4b899, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/1a1c4548a5814eb6858649fc77aa61f6] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=47.0 K 2024-11-20T22:24:33,569 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 3765cec605ba402895eb212045a72b8e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141469208 2024-11-20T22:24:33,569 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50447 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:33,570 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/A is initiating minor compaction (all files) 2024-11-20T22:24:33,570 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/A in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:33,570 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/a079e6617a1146ca8b15c6f6f93a1e3d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1dd5bd04440a4d5d868e3da06039766a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/e7e05530614b49c3bd50d4dbfe1b22d8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/b571feffea6a4576a67e28d408af0eb9] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=49.3 K 2024-11-20T22:24:33,570 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 32fe3af0c6614c3cbf67f8c4e4c68cff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732141469624 2024-11-20T22:24:33,571 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting a079e6617a1146ca8b15c6f6f93a1e3d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141469208 2024-11-20T22:24:33,571 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 65168271972f4df1b2f2fd6c55b4b899, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732141470308 2024-11-20T22:24:33,573 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1dd5bd04440a4d5d868e3da06039766a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732141469624 2024-11-20T22:24:33,574 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a1c4548a5814eb6858649fc77aa61f6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141471539 2024-11-20T22:24:33,574 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7e05530614b49c3bd50d4dbfe1b22d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732141470308 2024-11-20T22:24:33,575 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting b571feffea6a4576a67e28d408af0eb9, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141471539 2024-11-20T22:24:33,608 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#B#compaction#204 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:33,608 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/64dcf2db4d0e4723a584da75969ed301 is 50, key is test_row_0/B:col10/1732141471539/Put/seqid=0 2024-11-20T22:24:33,617 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#A#compaction#205 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:33,618 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/13242446c34d4bfb926ae51570ca65e5 is 50, key is test_row_0/A:col10/1732141471539/Put/seqid=0 2024-11-20T22:24:33,643 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:33,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T22:24:33,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:33,647 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T22:24:33,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:33,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:33,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:33,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:33,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:33,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:33,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742076_1252 (size=12241) 2024-11-20T22:24:33,668 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/64dcf2db4d0e4723a584da75969ed301 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/64dcf2db4d0e4723a584da75969ed301 2024-11-20T22:24:33,688 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/B of 1ae43dfbfefd3b112decface0ed50cc2 into 64dcf2db4d0e4723a584da75969ed301(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:33,688 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:33,688 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/B, priority=12, startTime=1732141473563; duration=0sec 2024-11-20T22:24:33,688 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:33,688 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:B 2024-11-20T22:24:33,688 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:33,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742077_1253 (size=12241) 2024-11-20T22:24:33,692 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:33,692 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/C is initiating minor compaction (all files) 2024-11-20T22:24:33,692 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/C in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:33,692 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/16a6a034a67f49668a438e14de202893, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/a25aba0e1bcc4938954431e2fe903500, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/5e7f49a5e0de4a6a87c9efff8c99c249, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/67203502698f41e18aab95acf3967314] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=47.0 K 2024-11-20T22:24:33,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/1379ffa48d1044aa85fcd2f834096987 is 50, key is test_row_0/A:col10/1732141472752/Put/seqid=0 2024-11-20T22:24:33,694 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 16a6a034a67f49668a438e14de202893, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141469208 2024-11-20T22:24:33,697 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting a25aba0e1bcc4938954431e2fe903500, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732141469624 2024-11-20T22:24:33,699 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e7f49a5e0de4a6a87c9efff8c99c249, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732141470308 2024-11-20T22:24:33,699 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 67203502698f41e18aab95acf3967314, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141471539 2024-11-20T22:24:33,700 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/13242446c34d4bfb926ae51570ca65e5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/13242446c34d4bfb926ae51570ca65e5 2024-11-20T22:24:33,711 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/A of 1ae43dfbfefd3b112decface0ed50cc2 into 13242446c34d4bfb926ae51570ca65e5(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:33,711 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:33,711 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/A, priority=12, startTime=1732141473563; duration=0sec 2024-11-20T22:24:33,712 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:33,712 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:A 2024-11-20T22:24:33,808 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#C#compaction#207 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:33,809 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/da15edadbdc74415932d1478e4a34732 is 50, key is test_row_0/C:col10/1732141471539/Put/seqid=0 2024-11-20T22:24:33,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T22:24:33,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742078_1254 (size=12001) 2024-11-20T22:24:33,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:33,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:33,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742079_1255 (size=12241) 2024-11-20T22:24:33,889 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/da15edadbdc74415932d1478e4a34732 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/da15edadbdc74415932d1478e4a34732 2024-11-20T22:24:33,895 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/C of 1ae43dfbfefd3b112decface0ed50cc2 into da15edadbdc74415932d1478e4a34732(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:33,895 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:33,895 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/C, priority=12, startTime=1732141473563; duration=0sec 2024-11-20T22:24:33,895 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:33,895 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:C 2024-11-20T22:24:34,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141533995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141533996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141533997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141534011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141534012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141534127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141534129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,134 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141534130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,134 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141534130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141534132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,223 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/1379ffa48d1044aa85fcd2f834096987 2024-11-20T22:24:34,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/9511ff5918e84d448772ae995a45fc5c is 50, key is test_row_0/B:col10/1732141472752/Put/seqid=0 2024-11-20T22:24:34,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T22:24:34,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141534339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141534339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141534339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141534339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141534339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742080_1256 (size=12001) 2024-11-20T22:24:34,372 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/9511ff5918e84d448772ae995a45fc5c 2024-11-20T22:24:34,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/b5f11d593c1b4ee995faa6f7e244d74a is 50, key is test_row_0/C:col10/1732141472752/Put/seqid=0 2024-11-20T22:24:34,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742081_1257 (size=12001) 2024-11-20T22:24:34,464 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/b5f11d593c1b4ee995faa6f7e244d74a 2024-11-20T22:24:34,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/1379ffa48d1044aa85fcd2f834096987 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1379ffa48d1044aa85fcd2f834096987 2024-11-20T22:24:34,491 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1379ffa48d1044aa85fcd2f834096987, entries=150, sequenceid=127, filesize=11.7 K 2024-11-20T22:24:34,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/9511ff5918e84d448772ae995a45fc5c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/9511ff5918e84d448772ae995a45fc5c 2024-11-20T22:24:34,502 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/9511ff5918e84d448772ae995a45fc5c, entries=150, sequenceid=127, filesize=11.7 K 2024-11-20T22:24:34,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/b5f11d593c1b4ee995faa6f7e244d74a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/b5f11d593c1b4ee995faa6f7e244d74a 2024-11-20T22:24:34,514 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/b5f11d593c1b4ee995faa6f7e244d74a, entries=150, sequenceid=127, filesize=11.7 K 2024-11-20T22:24:34,516 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 1ae43dfbfefd3b112decface0ed50cc2 in 868ms, sequenceid=127, compaction requested=false 2024-11-20T22:24:34,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:34,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:34,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-20T22:24:34,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-20T22:24:34,518 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-20T22:24:34,518 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3450 sec 2024-11-20T22:24:34,520 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.3650 sec 2024-11-20T22:24:34,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:34,650 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T22:24:34,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:34,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:34,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:34,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:34,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:34,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:34,671 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/fd1cc53d20c74be3b07e704dd535abf6 is 50, key is test_row_0/A:col10/1732141474648/Put/seqid=0 2024-11-20T22:24:34,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141534663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141534664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141534667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141534667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141534668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742082_1258 (size=14541) 2024-11-20T22:24:34,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/fd1cc53d20c74be3b07e704dd535abf6 2024-11-20T22:24:34,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/2636b8331b09416da7c32dcb74968adc is 50, key is test_row_0/B:col10/1732141474648/Put/seqid=0 2024-11-20T22:24:34,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141534777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141534777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141534777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141534778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141534787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:34,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742083_1259 (size=12151) 2024-11-20T22:24:34,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/2636b8331b09416da7c32dcb74968adc 2024-11-20T22:24:34,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/bf81eb17af844f378aba787de5f269e6 is 50, key is test_row_0/C:col10/1732141474648/Put/seqid=0 2024-11-20T22:24:34,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742084_1260 (size=12151) 2024-11-20T22:24:34,956 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/bf81eb17af844f378aba787de5f269e6 2024-11-20T22:24:34,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/fd1cc53d20c74be3b07e704dd535abf6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/fd1cc53d20c74be3b07e704dd535abf6 2024-11-20T22:24:34,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/fd1cc53d20c74be3b07e704dd535abf6, entries=200, sequenceid=159, filesize=14.2 K 2024-11-20T22:24:34,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/2636b8331b09416da7c32dcb74968adc as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/2636b8331b09416da7c32dcb74968adc 2024-11-20T22:24:34,990 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/2636b8331b09416da7c32dcb74968adc, entries=150, sequenceid=159, filesize=11.9 K 2024-11-20T22:24:34,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/bf81eb17af844f378aba787de5f269e6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/bf81eb17af844f378aba787de5f269e6 2024-11-20T22:24:34,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/bf81eb17af844f378aba787de5f269e6, entries=150, sequenceid=159, filesize=11.9 K 2024-11-20T22:24:35,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 1ae43dfbfefd3b112decface0ed50cc2 in 357ms, sequenceid=159, compaction requested=true 2024-11-20T22:24:35,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:35,008 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:24:35,009 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38783 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:35,009 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/A is initiating minor compaction (all files) 2024-11-20T22:24:35,009 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/A in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:35,009 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/13242446c34d4bfb926ae51570ca65e5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1379ffa48d1044aa85fcd2f834096987, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/fd1cc53d20c74be3b07e704dd535abf6] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=37.9 K 2024-11-20T22:24:35,010 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 13242446c34d4bfb926ae51570ca65e5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141471539 2024-11-20T22:24:35,010 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 1379ffa48d1044aa85fcd2f834096987, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732141472745 2024-11-20T22:24:35,010 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:35,011 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting fd1cc53d20c74be3b07e704dd535abf6, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732141473977 2024-11-20T22:24:35,022 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#A#compaction#213 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:35,023 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:35,023 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/B is initiating minor compaction (all files) 2024-11-20T22:24:35,023 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/B in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:35,023 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/3358eda11ac34273894878aa1e6db6be is 50, key is test_row_0/A:col10/1732141474648/Put/seqid=0 2024-11-20T22:24:35,023 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/64dcf2db4d0e4723a584da75969ed301, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/9511ff5918e84d448772ae995a45fc5c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/2636b8331b09416da7c32dcb74968adc] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=35.5 K 2024-11-20T22:24:35,026 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64dcf2db4d0e4723a584da75969ed301, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141471539 2024-11-20T22:24:35,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:35,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:35,037 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9511ff5918e84d448772ae995a45fc5c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732141472745 2024-11-20T22:24:35,037 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2636b8331b09416da7c32dcb74968adc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732141473977 2024-11-20T22:24:35,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:35,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:35,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:35,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:35,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:35,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:35,061 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#B#compaction#214 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:35,061 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/44c98c57b3bc4c47b69cea1a6bba284e is 50, key is test_row_0/B:col10/1732141474648/Put/seqid=0 2024-11-20T22:24:35,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742085_1261 (size=12493) 2024-11-20T22:24:35,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/13b11c0fb3b24ebaba32ab8b7f01fcef is 50, key is test_row_0/A:col10/1732141475031/Put/seqid=0 2024-11-20T22:24:35,090 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/3358eda11ac34273894878aa1e6db6be as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/3358eda11ac34273894878aa1e6db6be 2024-11-20T22:24:35,097 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/A of 1ae43dfbfefd3b112decface0ed50cc2 into 3358eda11ac34273894878aa1e6db6be(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:35,097 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:35,097 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/A, priority=13, startTime=1732141475008; duration=0sec 2024-11-20T22:24:35,097 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:35,097 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:A 2024-11-20T22:24:35,097 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:35,100 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:35,100 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/C is initiating minor compaction (all files) 2024-11-20T22:24:35,100 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/C in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:35,100 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/da15edadbdc74415932d1478e4a34732, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/b5f11d593c1b4ee995faa6f7e244d74a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/bf81eb17af844f378aba787de5f269e6] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=35.5 K 2024-11-20T22:24:35,101 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting da15edadbdc74415932d1478e4a34732, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141471539 2024-11-20T22:24:35,101 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting b5f11d593c1b4ee995faa6f7e244d74a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732141472745 2024-11-20T22:24:35,102 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting bf81eb17af844f378aba787de5f269e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732141473977 2024-11-20T22:24:35,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742086_1262 (size=12493) 2024-11-20T22:24:35,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141535126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141535127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141535130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,146 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/44c98c57b3bc4c47b69cea1a6bba284e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/44c98c57b3bc4c47b69cea1a6bba284e 2024-11-20T22:24:35,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742087_1263 (size=14541) 2024-11-20T22:24:35,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/13b11c0fb3b24ebaba32ab8b7f01fcef 2024-11-20T22:24:35,165 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#C#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:35,165 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/2a1b76bd4abc49388605aeac2df11bad is 50, key is test_row_0/C:col10/1732141474648/Put/seqid=0 2024-11-20T22:24:35,167 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/B of 1ae43dfbfefd3b112decface0ed50cc2 into 44c98c57b3bc4c47b69cea1a6bba284e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:35,167 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:35,167 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/B, priority=13, startTime=1732141475008; duration=0sec 2024-11-20T22:24:35,167 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:35,167 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:B 2024-11-20T22:24:35,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141535137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141535137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,172 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/06f7ffb79daf474f85f91fdec4e045c0 is 50, key is test_row_0/B:col10/1732141475031/Put/seqid=0 2024-11-20T22:24:35,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742088_1264 (size=12493) 2024-11-20T22:24:35,219 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/2a1b76bd4abc49388605aeac2df11bad as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/2a1b76bd4abc49388605aeac2df11bad 2024-11-20T22:24:35,225 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/C of 1ae43dfbfefd3b112decface0ed50cc2 into 2a1b76bd4abc49388605aeac2df11bad(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:35,225 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:35,226 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/C, priority=13, startTime=1732141475008; duration=0sec 2024-11-20T22:24:35,226 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:35,226 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:C 2024-11-20T22:24:35,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742089_1265 (size=12151) 2024-11-20T22:24:35,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/06f7ffb79daf474f85f91fdec4e045c0 2024-11-20T22:24:35,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141535240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141535240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141535240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/0e2705a549834475bf51a64c6e86640d is 50, key is test_row_0/C:col10/1732141475031/Put/seqid=0 2024-11-20T22:24:35,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141535269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141535269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T22:24:35,314 INFO [Thread-1082 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-20T22:24:35,316 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:35,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742090_1266 (size=12151) 2024-11-20T22:24:35,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-20T22:24:35,318 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:35,319 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:35,319 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:35,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:24:35,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:24:35,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141535448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141535448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141535450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,479 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:24:35,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:35,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:35,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:35,480 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141535480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141535480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:24:35,644 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:24:35,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:35,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:35,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:35,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,721 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/0e2705a549834475bf51a64c6e86640d 2024-11-20T22:24:35,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/13b11c0fb3b24ebaba32ab8b7f01fcef as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/13b11c0fb3b24ebaba32ab8b7f01fcef 2024-11-20T22:24:35,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/13b11c0fb3b24ebaba32ab8b7f01fcef, entries=200, sequenceid=171, filesize=14.2 K 2024-11-20T22:24:35,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/06f7ffb79daf474f85f91fdec4e045c0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/06f7ffb79daf474f85f91fdec4e045c0 2024-11-20T22:24:35,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/06f7ffb79daf474f85f91fdec4e045c0, entries=150, sequenceid=171, filesize=11.9 K 2024-11-20T22:24:35,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/0e2705a549834475bf51a64c6e86640d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/0e2705a549834475bf51a64c6e86640d 2024-11-20T22:24:35,761 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/0e2705a549834475bf51a64c6e86640d, entries=150, sequenceid=171, filesize=11.9 K 2024-11-20T22:24:35,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 1ae43dfbfefd3b112decface0ed50cc2 in 731ms, sequenceid=171, compaction requested=false 2024-11-20T22:24:35,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:35,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:35,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T22:24:35,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:35,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:35,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:35,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:35,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:35,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:35,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/0a07d1359c144e61aa260b491521685e is 50, key is test_row_0/A:col10/1732141475765/Put/seqid=0 2024-11-20T22:24:35,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141535789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141535789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,801 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:24:35,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:35,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:35,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:35,801 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141535791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141535802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141535805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742091_1267 (size=14541) 2024-11-20T22:24:35,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141535899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141535900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141535905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141535913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141535913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:24:35,954 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:35,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:24:35,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:35,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:35,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:35,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,111 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:24:36,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:36,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141536108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141536119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141536119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141536119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141536119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,226 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/0a07d1359c144e61aa260b491521685e 2024-11-20T22:24:36,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/07d0206eec26492ea402976370c5c759 is 50, key is test_row_0/B:col10/1732141475765/Put/seqid=0 2024-11-20T22:24:36,264 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:24:36,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:36,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,265 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742092_1268 (size=12151) 2024-11-20T22:24:36,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141536415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,419 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:24:36,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:36,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,419 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:24:36,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141536430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141536430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141536431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141536431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,572 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:24:36,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:36,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,574 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/07d0206eec26492ea402976370c5c759 2024-11-20T22:24:36,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/b7b5b2610bc246ce938cd93f67a7be9d is 50, key is test_row_0/C:col10/1732141475765/Put/seqid=0 2024-11-20T22:24:36,731 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:24:36,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:36,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:36,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742093_1269 (size=12151) 2024-11-20T22:24:36,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/b7b5b2610bc246ce938cd93f67a7be9d 2024-11-20T22:24:36,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/0a07d1359c144e61aa260b491521685e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/0a07d1359c144e61aa260b491521685e 2024-11-20T22:24:36,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/0a07d1359c144e61aa260b491521685e, entries=200, sequenceid=201, filesize=14.2 K 2024-11-20T22:24:36,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/07d0206eec26492ea402976370c5c759 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/07d0206eec26492ea402976370c5c759 2024-11-20T22:24:36,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/07d0206eec26492ea402976370c5c759, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T22:24:36,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/b7b5b2610bc246ce938cd93f67a7be9d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/b7b5b2610bc246ce938cd93f67a7be9d 2024-11-20T22:24:36,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/b7b5b2610bc246ce938cd93f67a7be9d, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T22:24:36,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 1ae43dfbfefd3b112decface0ed50cc2 in 1064ms, sequenceid=201, compaction requested=true 2024-11-20T22:24:36,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:36,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:36,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:36,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:36,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:24:36,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:36,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T22:24:36,831 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:36,831 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:36,832 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:36,832 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/C is initiating minor compaction (all files) 2024-11-20T22:24:36,832 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/C in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,833 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/2a1b76bd4abc49388605aeac2df11bad, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/0e2705a549834475bf51a64c6e86640d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/b7b5b2610bc246ce938cd93f67a7be9d] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=35.9 K 2024-11-20T22:24:36,833 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41575 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:36,833 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/A is initiating minor compaction (all files) 2024-11-20T22:24:36,833 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a1b76bd4abc49388605aeac2df11bad, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732141473977 2024-11-20T22:24:36,833 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/A in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,833 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/3358eda11ac34273894878aa1e6db6be, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/13b11c0fb3b24ebaba32ab8b7f01fcef, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/0a07d1359c144e61aa260b491521685e] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=40.6 K 2024-11-20T22:24:36,834 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3358eda11ac34273894878aa1e6db6be, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732141473977 2024-11-20T22:24:36,834 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e2705a549834475bf51a64c6e86640d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732141475021 2024-11-20T22:24:36,834 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13b11c0fb3b24ebaba32ab8b7f01fcef, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732141475021 2024-11-20T22:24:36,835 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting b7b5b2610bc246ce938cd93f67a7be9d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732141475124 2024-11-20T22:24:36,835 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a07d1359c144e61aa260b491521685e, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732141475124 2024-11-20T22:24:36,877 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#A#compaction#222 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:36,877 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/92a66bcc40c74a37b33eaf13591dbde9 is 50, key is test_row_0/A:col10/1732141475765/Put/seqid=0 2024-11-20T22:24:36,879 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#C#compaction#223 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:36,880 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/ded08eb651e24aef87ce033e2965ec44 is 50, key is test_row_0/C:col10/1732141475765/Put/seqid=0 2024-11-20T22:24:36,907 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:36,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:24:36,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,907 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T22:24:36,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:36,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:36,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:36,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:36,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:36,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:36,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742095_1271 (size=12595) 2024-11-20T22:24:36,933 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/92a66bcc40c74a37b33eaf13591dbde9 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/92a66bcc40c74a37b33eaf13591dbde9 2024-11-20T22:24:36,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:36,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:36,944 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/A of 1ae43dfbfefd3b112decface0ed50cc2 into 92a66bcc40c74a37b33eaf13591dbde9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:36,944 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:36,944 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/A, priority=13, startTime=1732141476830; duration=0sec 2024-11-20T22:24:36,946 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:36,946 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:A 2024-11-20T22:24:36,946 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:36,947 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:36,947 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/B is initiating minor compaction (all files) 2024-11-20T22:24:36,947 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/B in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:36,947 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/44c98c57b3bc4c47b69cea1a6bba284e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/06f7ffb79daf474f85f91fdec4e045c0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/07d0206eec26492ea402976370c5c759] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=35.9 K 2024-11-20T22:24:36,950 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44c98c57b3bc4c47b69cea1a6bba284e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732141473977 2024-11-20T22:24:36,957 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06f7ffb79daf474f85f91fdec4e045c0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732141475021 2024-11-20T22:24:36,959 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07d0206eec26492ea402976370c5c759, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732141475124 2024-11-20T22:24:36,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742094_1270 (size=12595) 2024-11-20T22:24:36,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/40aa86b85a8d448d96729d4dc30dd354 is 50, key is test_row_0/A:col10/1732141475795/Put/seqid=0 2024-11-20T22:24:36,984 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#B#compaction#225 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:36,985 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/b1c93c376d254d83920437c8c5a1955b is 50, key is test_row_0/B:col10/1732141475765/Put/seqid=0 2024-11-20T22:24:37,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742096_1272 (size=12151) 2024-11-20T22:24:37,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141537011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141537014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141537012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141537027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141537027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742097_1273 (size=12595) 2024-11-20T22:24:37,070 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/b1c93c376d254d83920437c8c5a1955b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/b1c93c376d254d83920437c8c5a1955b 2024-11-20T22:24:37,077 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/B of 1ae43dfbfefd3b112decface0ed50cc2 into b1c93c376d254d83920437c8c5a1955b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:37,077 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:37,077 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/B, priority=13, startTime=1732141476830; duration=0sec 2024-11-20T22:24:37,077 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:37,077 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:B 2024-11-20T22:24:37,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141537128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141537128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141537130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141537132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141537140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141537334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141537336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141537336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141537343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141537351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,380 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/ded08eb651e24aef87ce033e2965ec44 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/ded08eb651e24aef87ce033e2965ec44 2024-11-20T22:24:37,395 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/C of 1ae43dfbfefd3b112decface0ed50cc2 into ded08eb651e24aef87ce033e2965ec44(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:37,395 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:37,395 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/C, priority=13, startTime=1732141476830; duration=0sec 2024-11-20T22:24:37,395 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:37,395 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:C 2024-11-20T22:24:37,412 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/40aa86b85a8d448d96729d4dc30dd354 2024-11-20T22:24:37,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/c8b252f1743747688317931515e48093 is 50, key is test_row_0/B:col10/1732141475795/Put/seqid=0 2024-11-20T22:24:37,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:24:37,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742098_1274 (size=12151) 2024-11-20T22:24:37,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141537640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141537641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141537641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141537662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141537663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:37,878 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/c8b252f1743747688317931515e48093 2024-11-20T22:24:37,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/0519d94d37f649d5864287dac308d2d5 is 50, key is test_row_0/C:col10/1732141475795/Put/seqid=0 2024-11-20T22:24:37,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742099_1275 (size=12151) 2024-11-20T22:24:38,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141538145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:38,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141538155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:38,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141538162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:38,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141538167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:38,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141538175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:38,312 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/0519d94d37f649d5864287dac308d2d5 2024-11-20T22:24:38,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/40aa86b85a8d448d96729d4dc30dd354 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/40aa86b85a8d448d96729d4dc30dd354 2024-11-20T22:24:38,341 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/40aa86b85a8d448d96729d4dc30dd354, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T22:24:38,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/c8b252f1743747688317931515e48093 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c8b252f1743747688317931515e48093 2024-11-20T22:24:38,359 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c8b252f1743747688317931515e48093, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T22:24:38,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/0519d94d37f649d5864287dac308d2d5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/0519d94d37f649d5864287dac308d2d5 2024-11-20T22:24:38,371 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/0519d94d37f649d5864287dac308d2d5, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T22:24:38,372 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 1ae43dfbfefd3b112decface0ed50cc2 in 1465ms, sequenceid=211, compaction requested=false 2024-11-20T22:24:38,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:38,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:38,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-20T22:24:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-20T22:24:38,375 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-20T22:24:38,376 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0540 sec 2024-11-20T22:24:38,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 3.0600 sec 2024-11-20T22:24:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:39,159 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T22:24:39,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:39,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:39,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:39,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:39,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:39,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:39,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141539172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141539173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/01e22282941b47dfa9832a0475b43ae2 is 50, key is test_row_0/A:col10/1732141479156/Put/seqid=0 2024-11-20T22:24:39,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141539174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141539176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141539180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742100_1276 (size=12151) 2024-11-20T22:24:39,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141539281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141539286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141539287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:24:39,435 INFO [Thread-1082 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-20T22:24:39,440 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:39,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-20T22:24:39,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T22:24:39,442 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:39,451 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:39,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:39,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141539489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141539492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T22:24:39,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141539496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,615 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T22:24:39,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:39,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:39,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:39,616 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/01e22282941b47dfa9832a0475b43ae2 2024-11-20T22:24:39,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/e25bc8a7e3724d18b96ad6fbe9bc1a50 is 50, key is test_row_0/B:col10/1732141479156/Put/seqid=0 2024-11-20T22:24:39,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742101_1277 (size=12151) 2024-11-20T22:24:39,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T22:24:39,787 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T22:24:39,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:39,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:39,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:39,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141539847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141539847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:39,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141539851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,942 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:39,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T22:24:39,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:39,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:39,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:39,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:40,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T22:24:40,102 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:40,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T22:24:40,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:40,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:40,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:40,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:40,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:40,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:40,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/e25bc8a7e3724d18b96ad6fbe9bc1a50 2024-11-20T22:24:40,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/731a635790464fc883c130b5222af593 is 50, key is test_row_0/C:col10/1732141479156/Put/seqid=0 2024-11-20T22:24:40,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742102_1278 (size=12151) 2024-11-20T22:24:40,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/731a635790464fc883c130b5222af593 2024-11-20T22:24:40,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/01e22282941b47dfa9832a0475b43ae2 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/01e22282941b47dfa9832a0475b43ae2 2024-11-20T22:24:40,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/01e22282941b47dfa9832a0475b43ae2, entries=150, sequenceid=241, filesize=11.9 K 2024-11-20T22:24:40,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/e25bc8a7e3724d18b96ad6fbe9bc1a50 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e25bc8a7e3724d18b96ad6fbe9bc1a50 2024-11-20T22:24:40,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e25bc8a7e3724d18b96ad6fbe9bc1a50, entries=150, sequenceid=241, filesize=11.9 K 2024-11-20T22:24:40,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/731a635790464fc883c130b5222af593 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/731a635790464fc883c130b5222af593 2024-11-20T22:24:40,252 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/731a635790464fc883c130b5222af593, entries=150, sequenceid=241, filesize=11.9 K 2024-11-20T22:24:40,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 1ae43dfbfefd3b112decface0ed50cc2 in 1095ms, sequenceid=241, compaction requested=true 2024-11-20T22:24:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:40,254 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:40,254 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:40,256 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:40,256 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/A is initiating minor compaction (all files) 2024-11-20T22:24:40,256 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/A in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:40,256 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/92a66bcc40c74a37b33eaf13591dbde9, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/40aa86b85a8d448d96729d4dc30dd354, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/01e22282941b47dfa9832a0475b43ae2] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=36.0 K 2024-11-20T22:24:40,256 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:40,256 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/B is initiating minor compaction (all files) 2024-11-20T22:24:40,256 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/B in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:40,256 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/b1c93c376d254d83920437c8c5a1955b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c8b252f1743747688317931515e48093, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e25bc8a7e3724d18b96ad6fbe9bc1a50] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=36.0 K 2024-11-20T22:24:40,257 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92a66bcc40c74a37b33eaf13591dbde9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732141475124 2024-11-20T22:24:40,257 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting b1c93c376d254d83920437c8c5a1955b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732141475124 2024-11-20T22:24:40,257 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40aa86b85a8d448d96729d4dc30dd354, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732141475780 2024-11-20T22:24:40,259 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c8b252f1743747688317931515e48093, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732141475780 2024-11-20T22:24:40,259 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01e22282941b47dfa9832a0475b43ae2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732141477009 2024-11-20T22:24:40,260 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting e25bc8a7e3724d18b96ad6fbe9bc1a50, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732141477009 2024-11-20T22:24:40,261 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:40,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T22:24:40,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:40,264 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T22:24:40,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:40,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:40,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:40,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:40,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:40,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:40,281 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#B#compaction#231 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:40,282 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/d31692a3ea01408c9d6b7ab39694d9bf is 50, key is test_row_0/B:col10/1732141479156/Put/seqid=0 2024-11-20T22:24:40,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/627a10f73530414a9ea40dfec773c064 is 50, key is test_row_0/A:col10/1732141479171/Put/seqid=0 2024-11-20T22:24:40,312 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#A#compaction#233 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:40,313 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/bcb98197c5a0491485812ba7cf82e614 is 50, key is test_row_0/A:col10/1732141479156/Put/seqid=0 2024-11-20T22:24:40,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:40,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:40,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742104_1280 (size=12151) 2024-11-20T22:24:40,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742105_1281 (size=12697) 2024-11-20T22:24:40,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742103_1279 (size=12697) 2024-11-20T22:24:40,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:40,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141540410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:40,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:40,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141540413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:40,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:40,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141540413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:40,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141540514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:40,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141540521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:40,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:40,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141540521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:40,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T22:24:40,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:40,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141540717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:40,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:40,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141540727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:40,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:40,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141540727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:40,755 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/627a10f73530414a9ea40dfec773c064 2024-11-20T22:24:40,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/c0567ce1515943f79bafaf6870594c81 is 50, key is test_row_0/B:col10/1732141479171/Put/seqid=0 2024-11-20T22:24:40,803 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/bcb98197c5a0491485812ba7cf82e614 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/bcb98197c5a0491485812ba7cf82e614 2024-11-20T22:24:40,811 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/d31692a3ea01408c9d6b7ab39694d9bf as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/d31692a3ea01408c9d6b7ab39694d9bf 2024-11-20T22:24:40,814 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/A of 1ae43dfbfefd3b112decface0ed50cc2 into bcb98197c5a0491485812ba7cf82e614(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:40,814 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:40,815 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/A, priority=13, startTime=1732141480254; duration=0sec 2024-11-20T22:24:40,815 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:40,815 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:A 2024-11-20T22:24:40,815 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:40,822 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:40,822 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/C is initiating minor compaction (all files) 2024-11-20T22:24:40,822 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/C in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:40,822 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/ded08eb651e24aef87ce033e2965ec44, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/0519d94d37f649d5864287dac308d2d5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/731a635790464fc883c130b5222af593] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=36.0 K 2024-11-20T22:24:40,824 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/B of 1ae43dfbfefd3b112decface0ed50cc2 into d31692a3ea01408c9d6b7ab39694d9bf(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:40,824 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:40,824 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/B, priority=13, startTime=1732141480254; duration=0sec 2024-11-20T22:24:40,824 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:40,824 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:B 2024-11-20T22:24:40,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742106_1282 (size=12151) 2024-11-20T22:24:40,831 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting ded08eb651e24aef87ce033e2965ec44, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732141475124 2024-11-20T22:24:40,832 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0519d94d37f649d5864287dac308d2d5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732141475780 2024-11-20T22:24:40,832 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/c0567ce1515943f79bafaf6870594c81 2024-11-20T22:24:40,832 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 731a635790464fc883c130b5222af593, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732141477009 2024-11-20T22:24:40,861 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#C#compaction#235 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:40,861 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/99ca2cab4bf747c3843e548380b5ba5a is 50, key is test_row_0/C:col10/1732141479156/Put/seqid=0 2024-11-20T22:24:40,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/695dd35a7c424a49bdce9ce2c3fa679f is 50, key is test_row_0/C:col10/1732141479171/Put/seqid=0 2024-11-20T22:24:40,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742107_1283 (size=12697) 2024-11-20T22:24:40,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742108_1284 (size=12151) 2024-11-20T22:24:40,934 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/99ca2cab4bf747c3843e548380b5ba5a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/99ca2cab4bf747c3843e548380b5ba5a 2024-11-20T22:24:40,951 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/C of 1ae43dfbfefd3b112decface0ed50cc2 into 99ca2cab4bf747c3843e548380b5ba5a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:40,951 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:40,951 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/C, priority=13, startTime=1732141480254; duration=0sec 2024-11-20T22:24:40,951 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:40,951 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:C 2024-11-20T22:24:41,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:41,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141541023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:41,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141541035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:41,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141541042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141541187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,190 DEBUG [Thread-1074 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., hostname=6365a1e51efd,44631,1732141399950, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:41,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:41,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141541199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,203 DEBUG [Thread-1078 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4177 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., hostname=6365a1e51efd,44631,1732141399950, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:41,339 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/695dd35a7c424a49bdce9ce2c3fa679f 2024-11-20T22:24:41,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/627a10f73530414a9ea40dfec773c064 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/627a10f73530414a9ea40dfec773c064 2024-11-20T22:24:41,364 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/627a10f73530414a9ea40dfec773c064, entries=150, sequenceid=251, filesize=11.9 K 2024-11-20T22:24:41,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/c0567ce1515943f79bafaf6870594c81 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c0567ce1515943f79bafaf6870594c81 2024-11-20T22:24:41,370 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c0567ce1515943f79bafaf6870594c81, entries=150, sequenceid=251, filesize=11.9 K 2024-11-20T22:24:41,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/695dd35a7c424a49bdce9ce2c3fa679f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/695dd35a7c424a49bdce9ce2c3fa679f 2024-11-20T22:24:41,382 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/695dd35a7c424a49bdce9ce2c3fa679f, entries=150, sequenceid=251, filesize=11.9 K 2024-11-20T22:24:41,383 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 1ae43dfbfefd3b112decface0ed50cc2 in 1119ms, sequenceid=251, compaction requested=false 2024-11-20T22:24:41,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:41,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:41,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-20T22:24:41,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-20T22:24:41,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-20T22:24:41,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9340 sec 2024-11-20T22:24:41,392 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.9500 sec 2024-11-20T22:24:41,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:41,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T22:24:41,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:41,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:41,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:41,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:41,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:41,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:41,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/6a63b67d06994797a4298190dc81351c is 50, key is test_row_0/A:col10/1732141480412/Put/seqid=0 2024-11-20T22:24:41,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:41,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141541545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T22:24:41,548 INFO [Thread-1082 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-20T22:24:41,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:41,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141541546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,549 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:41,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-20T22:24:41,550 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:41,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T22:24:41,551 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:41,551 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:41,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:41,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141541552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742109_1285 (size=12301) 2024-11-20T22:24:41,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/6a63b67d06994797a4298190dc81351c 2024-11-20T22:24:41,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/1d1b9b09080b40b8b6b8d5fc0c8ea687 is 50, key is test_row_0/B:col10/1732141480412/Put/seqid=0 2024-11-20T22:24:41,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742110_1286 (size=12301) 2024-11-20T22:24:41,586 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/1d1b9b09080b40b8b6b8d5fc0c8ea687 2024-11-20T22:24:41,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/4dde0fb747e24cad9ca19a3bc2b7a45d is 50, key is test_row_0/C:col10/1732141480412/Put/seqid=0 2024-11-20T22:24:41,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742111_1287 (size=12301) 2024-11-20T22:24:41,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T22:24:41,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:41,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141541649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:41,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141541650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,703 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:24:41,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:41,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:41,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:41,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:41,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:41,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:41,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T22:24:41,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:41,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141541854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,857 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:41,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:24:41,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:41,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:41,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:41,858 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:41,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:41,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:41,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:41,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141541856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:42,010 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:42,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:24:42,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:42,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:42,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:42,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:42,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:42,050 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/4dde0fb747e24cad9ca19a3bc2b7a45d 2024-11-20T22:24:42,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/6a63b67d06994797a4298190dc81351c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/6a63b67d06994797a4298190dc81351c 2024-11-20T22:24:42,059 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/6a63b67d06994797a4298190dc81351c, entries=150, sequenceid=282, filesize=12.0 K 2024-11-20T22:24:42,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/1d1b9b09080b40b8b6b8d5fc0c8ea687 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/1d1b9b09080b40b8b6b8d5fc0c8ea687 2024-11-20T22:24:42,064 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/1d1b9b09080b40b8b6b8d5fc0c8ea687, entries=150, sequenceid=282, filesize=12.0 K 2024-11-20T22:24:42,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/4dde0fb747e24cad9ca19a3bc2b7a45d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/4dde0fb747e24cad9ca19a3bc2b7a45d 2024-11-20T22:24:42,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/4dde0fb747e24cad9ca19a3bc2b7a45d, entries=150, sequenceid=282, filesize=12.0 K 2024-11-20T22:24:42,078 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 1ae43dfbfefd3b112decface0ed50cc2 in 545ms, sequenceid=282, compaction requested=true 2024-11-20T22:24:42,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:42,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:42,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:42,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:42,078 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:42,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:42,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:42,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:24:42,078 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:42,079 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:42,080 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/B is initiating minor compaction (all files) 2024-11-20T22:24:42,080 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/B in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:42,080 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/d31692a3ea01408c9d6b7ab39694d9bf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c0567ce1515943f79bafaf6870594c81, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/1d1b9b09080b40b8b6b8d5fc0c8ea687] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=36.3 K 2024-11-20T22:24:42,080 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting d31692a3ea01408c9d6b7ab39694d9bf, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732141477009 2024-11-20T22:24:42,080 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c0567ce1515943f79bafaf6870594c81, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732141479171 2024-11-20T22:24:42,081 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d1b9b09080b40b8b6b8d5fc0c8ea687, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732141480399 2024-11-20T22:24:42,091 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:42,091 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/A is initiating minor compaction (all files) 2024-11-20T22:24:42,091 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/A in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:42,091 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/bcb98197c5a0491485812ba7cf82e614, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/627a10f73530414a9ea40dfec773c064, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/6a63b67d06994797a4298190dc81351c] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=36.3 K 2024-11-20T22:24:42,092 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#B#compaction#240 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:42,093 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcb98197c5a0491485812ba7cf82e614, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732141477009 2024-11-20T22:24:42,093 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/2ff3d8e6630a44cd9ffb362d27439b37 is 50, key is test_row_0/B:col10/1732141480412/Put/seqid=0 2024-11-20T22:24:42,094 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 627a10f73530414a9ea40dfec773c064, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732141479171 2024-11-20T22:24:42,095 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a63b67d06994797a4298190dc81351c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732141480399 2024-11-20T22:24:42,116 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#A#compaction#241 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:42,116 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/1d9cd2ed5151436bb122bb66eadf9053 is 50, key is test_row_0/A:col10/1732141480412/Put/seqid=0 2024-11-20T22:24:42,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742112_1288 (size=12949) 2024-11-20T22:24:42,152 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/2ff3d8e6630a44cd9ffb362d27439b37 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/2ff3d8e6630a44cd9ffb362d27439b37 2024-11-20T22:24:42,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T22:24:42,164 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:42,164 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/B of 1ae43dfbfefd3b112decface0ed50cc2 into 2ff3d8e6630a44cd9ffb362d27439b37(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:42,164 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:42,164 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/B, priority=13, startTime=1732141482078; duration=0sec 2024-11-20T22:24:42,164 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:42,164 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:B 2024-11-20T22:24:42,164 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:42,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:24:42,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:42,165 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T22:24:42,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:42,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:42,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:42,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:42,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:42,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:42,169 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:42,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742113_1289 (size=12949) 2024-11-20T22:24:42,170 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/C is initiating minor compaction (all files) 2024-11-20T22:24:42,170 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/C in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:42,171 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/99ca2cab4bf747c3843e548380b5ba5a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/695dd35a7c424a49bdce9ce2c3fa679f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/4dde0fb747e24cad9ca19a3bc2b7a45d] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=36.3 K 2024-11-20T22:24:42,175 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 99ca2cab4bf747c3843e548380b5ba5a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732141477009 2024-11-20T22:24:42,175 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 695dd35a7c424a49bdce9ce2c3fa679f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732141479171 2024-11-20T22:24:42,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:42,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:42,178 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/1d9cd2ed5151436bb122bb66eadf9053 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1d9cd2ed5151436bb122bb66eadf9053 2024-11-20T22:24:42,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/1224d8187c25423ca3729bdd983411b9 is 50, key is test_row_0/A:col10/1732141481544/Put/seqid=0 2024-11-20T22:24:42,183 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4dde0fb747e24cad9ca19a3bc2b7a45d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732141480399 2024-11-20T22:24:42,204 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#C#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:42,205 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/3435af535e21428e97820cf67ea2b6f0 is 50, key is test_row_0/C:col10/1732141480412/Put/seqid=0 2024-11-20T22:24:42,206 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/A of 1ae43dfbfefd3b112decface0ed50cc2 into 1d9cd2ed5151436bb122bb66eadf9053(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:42,206 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:42,206 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/A, priority=13, startTime=1732141482078; duration=0sec 2024-11-20T22:24:42,206 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:42,206 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:A 2024-11-20T22:24:42,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742114_1290 (size=12301) 2024-11-20T22:24:42,209 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/1224d8187c25423ca3729bdd983411b9 2024-11-20T22:24:42,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/16e5f925fbc3437da3c8e74e501ec131 is 50, key is test_row_0/B:col10/1732141481544/Put/seqid=0 2024-11-20T22:24:42,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742115_1291 (size=12949) 2024-11-20T22:24:42,259 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/3435af535e21428e97820cf67ea2b6f0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/3435af535e21428e97820cf67ea2b6f0 2024-11-20T22:24:42,267 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/C of 1ae43dfbfefd3b112decface0ed50cc2 into 3435af535e21428e97820cf67ea2b6f0(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:42,267 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:42,267 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/C, priority=13, startTime=1732141482078; duration=0sec 2024-11-20T22:24:42,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742116_1292 (size=12301) 2024-11-20T22:24:42,267 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:42,267 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:C 2024-11-20T22:24:42,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:42,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141542275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:42,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:42,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141542287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:42,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141542388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:42,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:42,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141542390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:42,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:42,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141542559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:42,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:42,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141542592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:42,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:42,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141542597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:42,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T22:24:42,668 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/16e5f925fbc3437da3c8e74e501ec131 2024-11-20T22:24:42,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/1219d20d7b544331a7dd42bb3c07cc6e is 50, key is test_row_0/C:col10/1732141481544/Put/seqid=0 2024-11-20T22:24:42,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742117_1293 (size=12301) 2024-11-20T22:24:42,707 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/1219d20d7b544331a7dd42bb3c07cc6e 2024-11-20T22:24:42,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/1224d8187c25423ca3729bdd983411b9 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1224d8187c25423ca3729bdd983411b9 2024-11-20T22:24:42,722 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1224d8187c25423ca3729bdd983411b9, entries=150, sequenceid=292, filesize=12.0 K 2024-11-20T22:24:42,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/16e5f925fbc3437da3c8e74e501ec131 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/16e5f925fbc3437da3c8e74e501ec131 2024-11-20T22:24:42,729 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/16e5f925fbc3437da3c8e74e501ec131, entries=150, sequenceid=292, filesize=12.0 K 2024-11-20T22:24:42,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/1219d20d7b544331a7dd42bb3c07cc6e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1219d20d7b544331a7dd42bb3c07cc6e 2024-11-20T22:24:42,736 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1219d20d7b544331a7dd42bb3c07cc6e, entries=150, sequenceid=292, filesize=12.0 K 2024-11-20T22:24:42,736 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 1ae43dfbfefd3b112decface0ed50cc2 in 571ms, sequenceid=292, compaction requested=false 2024-11-20T22:24:42,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:42,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:42,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-20T22:24:42,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-20T22:24:42,744 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-20T22:24:42,744 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1900 sec 2024-11-20T22:24:42,746 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.1960 sec 2024-11-20T22:24:42,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:42,903 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T22:24:42,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:42,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:42,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:42,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:42,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:42,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:42,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/f60da12f745f4439b58a4a05807fd7cd is 50, key is test_row_0/A:col10/1732141482277/Put/seqid=0 2024-11-20T22:24:42,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:42,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141542907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:42,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:42,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141542911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:42,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742118_1294 (size=14741) 2024-11-20T22:24:43,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:43,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141543015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:43,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:43,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141543022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:43,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:43,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141543221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:43,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:43,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141543227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:43,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/f60da12f745f4439b58a4a05807fd7cd 2024-11-20T22:24:43,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/4ab55897be4b40548ca4feeb5e165f0f is 50, key is test_row_0/B:col10/1732141482277/Put/seqid=0 2024-11-20T22:24:43,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742119_1295 (size=12301) 2024-11-20T22:24:43,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141543524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:43,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:43,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141543535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:43,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T22:24:43,666 INFO [Thread-1082 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-20T22:24:43,675 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:43,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-20T22:24:43,677 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:43,678 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:43,678 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:43,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T22:24:43,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T22:24:43,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/4ab55897be4b40548ca4feeb5e165f0f 2024-11-20T22:24:43,834 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:43,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T22:24:43,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:43,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:43,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:43,834 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:43,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:43,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:43,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/4c47f3c3b67942269507a3043a4ff836 is 50, key is test_row_0/C:col10/1732141482277/Put/seqid=0 2024-11-20T22:24:43,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742120_1296 (size=12301) 2024-11-20T22:24:43,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/4c47f3c3b67942269507a3043a4ff836 2024-11-20T22:24:43,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/f60da12f745f4439b58a4a05807fd7cd as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f60da12f745f4439b58a4a05807fd7cd 2024-11-20T22:24:43,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f60da12f745f4439b58a4a05807fd7cd, entries=200, sequenceid=322, filesize=14.4 K 2024-11-20T22:24:43,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/4ab55897be4b40548ca4feeb5e165f0f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/4ab55897be4b40548ca4feeb5e165f0f 2024-11-20T22:24:43,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/4ab55897be4b40548ca4feeb5e165f0f, entries=150, sequenceid=322, filesize=12.0 K 2024-11-20T22:24:43,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/4c47f3c3b67942269507a3043a4ff836 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/4c47f3c3b67942269507a3043a4ff836 2024-11-20T22:24:43,903 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/4c47f3c3b67942269507a3043a4ff836, entries=150, sequenceid=322, filesize=12.0 K 2024-11-20T22:24:43,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 1ae43dfbfefd3b112decface0ed50cc2 in 1001ms, sequenceid=322, compaction requested=true 2024-11-20T22:24:43,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:43,905 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:43,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:43,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:43,905 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:43,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:43,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:43,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:43,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:43,907 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:43,907 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/B is initiating minor compaction (all files) 2024-11-20T22:24:43,908 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/B in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:43,908 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/2ff3d8e6630a44cd9ffb362d27439b37, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/16e5f925fbc3437da3c8e74e501ec131, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/4ab55897be4b40548ca4feeb5e165f0f] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=36.7 K 2024-11-20T22:24:43,908 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39991 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:43,908 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/A is initiating minor compaction (all files) 2024-11-20T22:24:43,908 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/A in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:43,908 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1d9cd2ed5151436bb122bb66eadf9053, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1224d8187c25423ca3729bdd983411b9, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f60da12f745f4439b58a4a05807fd7cd] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=39.1 K 2024-11-20T22:24:43,911 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ff3d8e6630a44cd9ffb362d27439b37, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732141480399 2024-11-20T22:24:43,911 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d9cd2ed5151436bb122bb66eadf9053, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732141480399 2024-11-20T22:24:43,913 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1224d8187c25423ca3729bdd983411b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732141481540 2024-11-20T22:24:43,916 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 16e5f925fbc3437da3c8e74e501ec131, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732141481540 2024-11-20T22:24:43,917 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting f60da12f745f4439b58a4a05807fd7cd, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732141482242 2024-11-20T22:24:43,917 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ab55897be4b40548ca4feeb5e165f0f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732141482256 2024-11-20T22:24:43,949 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#A#compaction#249 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:43,949 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/ce3b11a1900641aa8c5b835becf0009b is 50, key is test_row_0/A:col10/1732141482277/Put/seqid=0 2024-11-20T22:24:43,957 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#B#compaction#250 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:43,957 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/e53d64df50484d5cb191dbbcbf3b6bb2 is 50, key is test_row_0/B:col10/1732141482277/Put/seqid=0 2024-11-20T22:24:43,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T22:24:43,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742122_1298 (size=13051) 2024-11-20T22:24:43,986 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:43,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T22:24:43,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:43,987 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T22:24:43,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:43,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:43,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:43,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:43,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:43,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:43,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742121_1297 (size=13051) 2024-11-20T22:24:43,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/35be16cba52c4627b6185c3a8ef3cbed is 50, key is test_row_0/A:col10/1732141482904/Put/seqid=0 2024-11-20T22:24:43,994 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/e53d64df50484d5cb191dbbcbf3b6bb2 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e53d64df50484d5cb191dbbcbf3b6bb2 2024-11-20T22:24:43,998 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/ce3b11a1900641aa8c5b835becf0009b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/ce3b11a1900641aa8c5b835becf0009b 2024-11-20T22:24:44,002 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/B of 1ae43dfbfefd3b112decface0ed50cc2 into e53d64df50484d5cb191dbbcbf3b6bb2(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:44,002 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:44,002 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/B, priority=13, startTime=1732141483905; duration=0sec 2024-11-20T22:24:44,003 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:44,003 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:B 2024-11-20T22:24:44,003 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:44,004 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:44,004 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/C is initiating minor compaction (all files) 2024-11-20T22:24:44,004 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/C in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:44,005 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/3435af535e21428e97820cf67ea2b6f0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1219d20d7b544331a7dd42bb3c07cc6e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/4c47f3c3b67942269507a3043a4ff836] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=36.7 K 2024-11-20T22:24:44,005 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/A of 1ae43dfbfefd3b112decface0ed50cc2 into ce3b11a1900641aa8c5b835becf0009b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:44,005 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:44,005 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/A, priority=13, startTime=1732141483904; duration=0sec 2024-11-20T22:24:44,005 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:44,005 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:A 2024-11-20T22:24:44,005 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 3435af535e21428e97820cf67ea2b6f0, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732141480399 2024-11-20T22:24:44,005 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 1219d20d7b544331a7dd42bb3c07cc6e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732141481540 2024-11-20T22:24:44,006 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c47f3c3b67942269507a3043a4ff836, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732141482256 2024-11-20T22:24:44,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742123_1299 (size=12301) 2024-11-20T22:24:44,017 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/35be16cba52c4627b6185c3a8ef3cbed 2024-11-20T22:24:44,019 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#C#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:44,020 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/13f4f5ec647a431083f12cbdf3a05a0b is 50, key is test_row_0/C:col10/1732141482277/Put/seqid=0 2024-11-20T22:24:44,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/f8f07d7713c045558c30f28996741761 is 50, key is test_row_0/B:col10/1732141482904/Put/seqid=0 2024-11-20T22:24:44,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:44,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:44,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742124_1300 (size=13051) 2024-11-20T22:24:44,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742125_1301 (size=12301) 2024-11-20T22:24:44,052 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/f8f07d7713c045558c30f28996741761 2024-11-20T22:24:44,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/76f613be932848f1890cb33d22c0f3bc is 50, key is test_row_0/C:col10/1732141482904/Put/seqid=0 2024-11-20T22:24:44,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:44,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141544093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:44,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141544096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742126_1302 (size=12301) 2024-11-20T22:24:44,121 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/76f613be932848f1890cb33d22c0f3bc 2024-11-20T22:24:44,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/35be16cba52c4627b6185c3a8ef3cbed as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/35be16cba52c4627b6185c3a8ef3cbed 2024-11-20T22:24:44,132 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/35be16cba52c4627b6185c3a8ef3cbed, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:24:44,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/f8f07d7713c045558c30f28996741761 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/f8f07d7713c045558c30f28996741761 2024-11-20T22:24:44,138 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/f8f07d7713c045558c30f28996741761, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:24:44,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/76f613be932848f1890cb33d22c0f3bc as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/76f613be932848f1890cb33d22c0f3bc 2024-11-20T22:24:44,146 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/76f613be932848f1890cb33d22c0f3bc, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:24:44,147 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 1ae43dfbfefd3b112decface0ed50cc2 in 160ms, sequenceid=330, compaction requested=false 2024-11-20T22:24:44,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:44,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:44,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-20T22:24:44,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-20T22:24:44,150 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-20T22:24:44,150 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 470 msec 2024-11-20T22:24:44,154 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 478 msec 2024-11-20T22:24:44,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:44,204 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T22:24:44,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:44,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:44,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:44,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:44,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:44,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:44,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/f8ed157014c241408d70cca34fe57f87 is 50, key is test_row_0/A:col10/1732141484088/Put/seqid=0 2024-11-20T22:24:44,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:44,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141544214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:44,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141544215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742127_1303 (size=14741) 2024-11-20T22:24:44,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T22:24:44,283 INFO [Thread-1082 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-20T22:24:44,284 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:44,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-20T22:24:44,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T22:24:44,285 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:44,286 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:44,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:44,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:44,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141544318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:44,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141544318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T22:24:44,437 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,438 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T22:24:44,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:44,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:44,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:44,438 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:44,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:44,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:44,447 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/13f4f5ec647a431083f12cbdf3a05a0b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/13f4f5ec647a431083f12cbdf3a05a0b 2024-11-20T22:24:44,453 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/C of 1ae43dfbfefd3b112decface0ed50cc2 into 13f4f5ec647a431083f12cbdf3a05a0b(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:44,453 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:44,453 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/C, priority=13, startTime=1732141483905; duration=0sec 2024-11-20T22:24:44,453 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:44,453 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:C 2024-11-20T22:24:44,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:44,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141544520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:44,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141544521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:44,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39734 deadline: 1732141544570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,571 DEBUG [Thread-1076 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., hostname=6365a1e51efd,44631,1732141399950, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:44,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T22:24:44,590 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T22:24:44,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:44,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:44,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:44,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:44,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:44,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:44,651 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/f8ed157014c241408d70cca34fe57f87 2024-11-20T22:24:44,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/e52f784ea1734662b5727679bf0ef363 is 50, key is test_row_0/B:col10/1732141484088/Put/seqid=0 2024-11-20T22:24:44,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742128_1304 (size=12301) 2024-11-20T22:24:44,745 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T22:24:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:44,746 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:44,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:44,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:44,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141544827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:44,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141544827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T22:24:44,898 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:44,899 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T22:24:44,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:44,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:44,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:44,901 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:44,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:44,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:45,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/e52f784ea1734662b5727679bf0ef363 2024-11-20T22:24:45,069 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:45,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T22:24:45,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:45,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:45,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:45,070 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:45,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:45,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:45,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/867b9249a8834fe0b0b0aca2367de70f is 50, key is test_row_0/C:col10/1732141484088/Put/seqid=0 2024-11-20T22:24:45,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742129_1305 (size=12301) 2024-11-20T22:24:45,117 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/867b9249a8834fe0b0b0aca2367de70f 2024-11-20T22:24:45,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/f8ed157014c241408d70cca34fe57f87 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f8ed157014c241408d70cca34fe57f87 2024-11-20T22:24:45,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f8ed157014c241408d70cca34fe57f87, entries=200, sequenceid=361, filesize=14.4 K 2024-11-20T22:24:45,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/e52f784ea1734662b5727679bf0ef363 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e52f784ea1734662b5727679bf0ef363 2024-11-20T22:24:45,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e52f784ea1734662b5727679bf0ef363, entries=150, sequenceid=361, filesize=12.0 K 2024-11-20T22:24:45,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/867b9249a8834fe0b0b0aca2367de70f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/867b9249a8834fe0b0b0aca2367de70f 2024-11-20T22:24:45,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/867b9249a8834fe0b0b0aca2367de70f, entries=150, sequenceid=361, filesize=12.0 K 2024-11-20T22:24:45,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 1ae43dfbfefd3b112decface0ed50cc2 in 984ms, sequenceid=361, compaction requested=true 2024-11-20T22:24:45,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:45,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:45,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:45,188 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:45,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:45,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:45,188 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:45,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:45,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:45,190 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:45,190 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/A is initiating minor compaction (all files) 2024-11-20T22:24:45,190 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/A in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:45,191 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/ce3b11a1900641aa8c5b835becf0009b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/35be16cba52c4627b6185c3a8ef3cbed, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f8ed157014c241408d70cca34fe57f87] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=39.2 K 2024-11-20T22:24:45,191 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:45,191 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/B is initiating minor compaction (all files) 2024-11-20T22:24:45,191 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/B in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:45,191 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e53d64df50484d5cb191dbbcbf3b6bb2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/f8f07d7713c045558c30f28996741761, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e52f784ea1734662b5727679bf0ef363] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=36.8 K 2024-11-20T22:24:45,192 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting e53d64df50484d5cb191dbbcbf3b6bb2, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732141482256 2024-11-20T22:24:45,193 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce3b11a1900641aa8c5b835becf0009b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732141482256 2024-11-20T22:24:45,193 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting f8f07d7713c045558c30f28996741761, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141482904 2024-11-20T22:24:45,193 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35be16cba52c4627b6185c3a8ef3cbed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141482904 2024-11-20T22:24:45,193 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting e52f784ea1734662b5727679bf0ef363, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1732141484088 2024-11-20T22:24:45,193 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8ed157014c241408d70cca34fe57f87, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1732141484088 2024-11-20T22:24:45,216 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#A#compaction#258 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:45,216 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/7ceb0a3637a946c296e3ecec1e638e7e is 50, key is test_row_0/A:col10/1732141484088/Put/seqid=0 2024-11-20T22:24:45,225 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#B#compaction#259 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:45,225 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/4d49c92b21d045d88841426281b20dbc is 50, key is test_row_0/B:col10/1732141484088/Put/seqid=0 2024-11-20T22:24:45,229 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:45,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T22:24:45,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:45,231 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T22:24:45,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:45,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:45,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:45,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:45,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:45,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:45,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742130_1306 (size=13153) 2024-11-20T22:24:45,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:45,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:45,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/f607033e78454e299df9b1a3d970706b is 50, key is test_row_0/A:col10/1732141484211/Put/seqid=0 2024-11-20T22:24:45,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742131_1307 (size=13153) 2024-11-20T22:24:45,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742132_1308 (size=12301) 2024-11-20T22:24:45,269 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/f607033e78454e299df9b1a3d970706b 2024-11-20T22:24:45,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/28fb7fd5599c4e46990d99484e7c614c is 50, key is test_row_0/B:col10/1732141484211/Put/seqid=0 2024-11-20T22:24:45,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742133_1309 (size=12301) 2024-11-20T22:24:45,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141545327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:45,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141545331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:45,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141545339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:45,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141545339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:45,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T22:24:45,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141545440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:45,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141545446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:45,645 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/7ceb0a3637a946c296e3ecec1e638e7e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/7ceb0a3637a946c296e3ecec1e638e7e 2024-11-20T22:24:45,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141545645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:45,654 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/4d49c92b21d045d88841426281b20dbc as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/4d49c92b21d045d88841426281b20dbc 2024-11-20T22:24:45,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141545649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:45,665 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/A of 1ae43dfbfefd3b112decface0ed50cc2 into 7ceb0a3637a946c296e3ecec1e638e7e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:45,665 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:45,665 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/A, priority=13, startTime=1732141485187; duration=0sec 2024-11-20T22:24:45,665 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:45,665 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:A 2024-11-20T22:24:45,665 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:45,669 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:45,669 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/C is initiating minor compaction (all files) 2024-11-20T22:24:45,669 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/C in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:45,669 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/13f4f5ec647a431083f12cbdf3a05a0b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/76f613be932848f1890cb33d22c0f3bc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/867b9249a8834fe0b0b0aca2367de70f] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=36.8 K 2024-11-20T22:24:45,670 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13f4f5ec647a431083f12cbdf3a05a0b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732141482256 2024-11-20T22:24:45,670 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76f613be932848f1890cb33d22c0f3bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141482904 2024-11-20T22:24:45,670 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/B of 1ae43dfbfefd3b112decface0ed50cc2 into 4d49c92b21d045d88841426281b20dbc(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:45,670 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:45,670 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/B, priority=13, startTime=1732141485188; duration=0sec 2024-11-20T22:24:45,670 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 867b9249a8834fe0b0b0aca2367de70f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1732141484088 2024-11-20T22:24:45,671 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:45,671 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:B 2024-11-20T22:24:45,689 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#C#compaction#262 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:45,689 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/1cb157449b6d4a168061e32ed2e90392 is 50, key is test_row_0/C:col10/1732141484088/Put/seqid=0 2024-11-20T22:24:45,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742134_1310 (size=13153) 2024-11-20T22:24:45,710 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/28fb7fd5599c4e46990d99484e7c614c 2024-11-20T22:24:45,722 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/1cb157449b6d4a168061e32ed2e90392 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1cb157449b6d4a168061e32ed2e90392 2024-11-20T22:24:45,734 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/C of 1ae43dfbfefd3b112decface0ed50cc2 into 1cb157449b6d4a168061e32ed2e90392(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:45,734 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:45,734 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/C, priority=13, startTime=1732141485188; duration=0sec 2024-11-20T22:24:45,734 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:45,734 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:C 2024-11-20T22:24:45,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/828938e435f54fb5892f58052e5cf494 is 50, key is test_row_0/C:col10/1732141484211/Put/seqid=0 2024-11-20T22:24:45,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742135_1311 (size=12301) 2024-11-20T22:24:45,789 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/828938e435f54fb5892f58052e5cf494 2024-11-20T22:24:45,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/f607033e78454e299df9b1a3d970706b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f607033e78454e299df9b1a3d970706b 2024-11-20T22:24:45,811 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f607033e78454e299df9b1a3d970706b, entries=150, sequenceid=370, filesize=12.0 K 2024-11-20T22:24:45,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/28fb7fd5599c4e46990d99484e7c614c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/28fb7fd5599c4e46990d99484e7c614c 2024-11-20T22:24:45,818 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/28fb7fd5599c4e46990d99484e7c614c, entries=150, sequenceid=370, filesize=12.0 K 2024-11-20T22:24:45,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/828938e435f54fb5892f58052e5cf494 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/828938e435f54fb5892f58052e5cf494 2024-11-20T22:24:45,824 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/828938e435f54fb5892f58052e5cf494, entries=150, sequenceid=370, filesize=12.0 K 2024-11-20T22:24:45,825 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 1ae43dfbfefd3b112decface0ed50cc2 in 595ms, sequenceid=370, compaction requested=false 2024-11-20T22:24:45,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:45,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:45,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-20T22:24:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-20T22:24:45,828 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-20T22:24:45,828 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5410 sec 2024-11-20T22:24:45,830 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.5450 sec 2024-11-20T22:24:45,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:45,953 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T22:24:45,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:45,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:45,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:45,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:45,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:45,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:45,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/b14bb02c97964c57b84d6eb247dbf246 is 50, key is test_row_0/A:col10/1732141485317/Put/seqid=0 2024-11-20T22:24:45,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141545967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:45,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141545975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:46,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742136_1312 (size=14741) 2024-11-20T22:24:46,010 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/b14bb02c97964c57b84d6eb247dbf246 2024-11-20T22:24:46,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/80c4f3517aff4fe9a3131dc7fc9b5b57 is 50, key is test_row_0/B:col10/1732141485317/Put/seqid=0 2024-11-20T22:24:46,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742137_1313 (size=12301) 2024-11-20T22:24:46,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141546079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:46,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141546090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:46,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141546284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:46,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141546303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:46,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39756 deadline: 1732141546351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:46,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39782 deadline: 1732141546352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T22:24:46,399 INFO [Thread-1082 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-20T22:24:46,400 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:46,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-20T22:24:46,402 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T22:24:46,402 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:46,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:46,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/80c4f3517aff4fe9a3131dc7fc9b5b57 2024-11-20T22:24:46,503 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/1bbe73bf25ef4adc81f50e70e08fc37e is 50, key is test_row_0/C:col10/1732141485317/Put/seqid=0 2024-11-20T22:24:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T22:24:46,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742138_1314 (size=12301) 2024-11-20T22:24:46,554 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:46,554 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T22:24:46,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:46,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:46,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:46,554 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39742 deadline: 1732141546590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:46,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39752 deadline: 1732141546612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T22:24:46,707 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:46,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T22:24:46,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:46,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:46,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:46,708 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,869 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:46,870 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T22:24:46,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:46,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. as already flushing 2024-11-20T22:24:46,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:46,870 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,932 DEBUG [Thread-1091 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x15db087a to 127.0.0.1:51822 2024-11-20T22:24:46,932 DEBUG [Thread-1091 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:46,932 DEBUG [Thread-1085 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78439bc6 to 127.0.0.1:51822 2024-11-20T22:24:46,932 DEBUG [Thread-1085 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:46,934 DEBUG [Thread-1083 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31178bc2 to 127.0.0.1:51822 2024-11-20T22:24:46,934 DEBUG [Thread-1083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:46,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/1bbe73bf25ef4adc81f50e70e08fc37e 2024-11-20T22:24:46,945 DEBUG [Thread-1089 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d039dc2 to 127.0.0.1:51822 2024-11-20T22:24:46,945 DEBUG [Thread-1089 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:46,945 DEBUG [Thread-1087 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a2545d0 to 127.0.0.1:51822 2024-11-20T22:24:46,945 DEBUG [Thread-1087 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:46,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/b14bb02c97964c57b84d6eb247dbf246 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/b14bb02c97964c57b84d6eb247dbf246 2024-11-20T22:24:46,960 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/b14bb02c97964c57b84d6eb247dbf246, entries=200, sequenceid=402, filesize=14.4 K 2024-11-20T22:24:46,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/80c4f3517aff4fe9a3131dc7fc9b5b57 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/80c4f3517aff4fe9a3131dc7fc9b5b57 2024-11-20T22:24:46,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/80c4f3517aff4fe9a3131dc7fc9b5b57, entries=150, sequenceid=402, filesize=12.0 K 2024-11-20T22:24:46,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/1bbe73bf25ef4adc81f50e70e08fc37e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1bbe73bf25ef4adc81f50e70e08fc37e 2024-11-20T22:24:46,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1bbe73bf25ef4adc81f50e70e08fc37e, entries=150, sequenceid=402, filesize=12.0 K 2024-11-20T22:24:46,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 1ae43dfbfefd3b112decface0ed50cc2 in 1031ms, sequenceid=402, compaction requested=true 2024-11-20T22:24:46,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:46,984 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:46,985 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:46,986 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/A is initiating minor compaction (all files) 2024-11-20T22:24:46,986 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/A in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:46,986 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/7ceb0a3637a946c296e3ecec1e638e7e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f607033e78454e299df9b1a3d970706b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/b14bb02c97964c57b84d6eb247dbf246] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=39.3 K 2024-11-20T22:24:46,986 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ceb0a3637a946c296e3ecec1e638e7e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1732141484088 2024-11-20T22:24:46,987 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting f607033e78454e299df9b1a3d970706b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732141484211 2024-11-20T22:24:46,987 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting b14bb02c97964c57b84d6eb247dbf246, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1732141485313 2024-11-20T22:24:46,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:46,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:46,995 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:46,996 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:46,996 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/B is initiating minor compaction (all files) 2024-11-20T22:24:46,996 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/B in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:46,997 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/4d49c92b21d045d88841426281b20dbc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/28fb7fd5599c4e46990d99484e7c614c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/80c4f3517aff4fe9a3131dc7fc9b5b57] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=36.9 K 2024-11-20T22:24:46,997 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d49c92b21d045d88841426281b20dbc, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1732141484088 2024-11-20T22:24:46,997 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 28fb7fd5599c4e46990d99484e7c614c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732141484211 2024-11-20T22:24:46,998 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 80c4f3517aff4fe9a3131dc7fc9b5b57, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1732141485317 2024-11-20T22:24:47,001 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#A#compaction#267 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:47,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T22:24:47,011 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/8d0ac7736b344d45b36166fdc4d61b6c is 50, key is test_row_0/A:col10/1732141485317/Put/seqid=0 2024-11-20T22:24:47,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:47,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:47,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ae43dfbfefd3b112decface0ed50cc2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:47,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:47,022 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#B#compaction#268 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:47,023 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:47,023 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/dd6b670b1699448abf38be2bce9c8460 is 50, key is test_row_0/B:col10/1732141485317/Put/seqid=0 2024-11-20T22:24:47,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T22:24:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:47,027 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T22:24:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:47,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:47,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:47,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742139_1315 (size=13255) 2024-11-20T22:24:47,068 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/8d0ac7736b344d45b36166fdc4d61b6c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/8d0ac7736b344d45b36166fdc4d61b6c 2024-11-20T22:24:47,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/6c8a77597203453eb27e349fe23aaeb4 is 50, key is test_row_0/A:col10/1732141485966/Put/seqid=0 2024-11-20T22:24:47,081 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/A of 1ae43dfbfefd3b112decface0ed50cc2 into 8d0ac7736b344d45b36166fdc4d61b6c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:47,081 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:47,081 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/A, priority=13, startTime=1732141486984; duration=0sec 2024-11-20T22:24:47,081 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:47,081 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:A 2024-11-20T22:24:47,081 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:47,086 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:47,086 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 1ae43dfbfefd3b112decface0ed50cc2/C is initiating minor compaction (all files) 2024-11-20T22:24:47,086 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1ae43dfbfefd3b112decface0ed50cc2/C in TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:47,087 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1cb157449b6d4a168061e32ed2e90392, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/828938e435f54fb5892f58052e5cf494, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1bbe73bf25ef4adc81f50e70e08fc37e] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp, totalSize=36.9 K 2024-11-20T22:24:47,088 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cb157449b6d4a168061e32ed2e90392, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1732141484088 2024-11-20T22:24:47,090 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 828938e435f54fb5892f58052e5cf494, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732141484211 2024-11-20T22:24:47,090 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bbe73bf25ef4adc81f50e70e08fc37e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1732141485317 2024-11-20T22:24:47,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742140_1316 (size=13255) 2024-11-20T22:24:47,109 DEBUG [Thread-1078 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a91dc80 to 127.0.0.1:51822 2024-11-20T22:24:47,109 DEBUG [Thread-1078 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:47,110 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/dd6b670b1699448abf38be2bce9c8460 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/dd6b670b1699448abf38be2bce9c8460 2024-11-20T22:24:47,111 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ae43dfbfefd3b112decface0ed50cc2#C#compaction#270 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:47,111 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/ac4bea7e8906448eaebf4f3951d95b10 is 50, key is test_row_0/C:col10/1732141485317/Put/seqid=0 2024-11-20T22:24:47,125 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/B of 1ae43dfbfefd3b112decface0ed50cc2 into dd6b670b1699448abf38be2bce9c8460(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:47,125 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:47,125 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/B, priority=13, startTime=1732141486995; duration=0sec 2024-11-20T22:24:47,125 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:47,125 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:B 2024-11-20T22:24:47,128 DEBUG [Thread-1074 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05e0e280 to 127.0.0.1:51822 2024-11-20T22:24:47,129 DEBUG [Thread-1074 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:47,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742141_1317 (size=12301) 2024-11-20T22:24:47,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742142_1318 (size=13255) 2024-11-20T22:24:47,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T22:24:47,534 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/6c8a77597203453eb27e349fe23aaeb4 2024-11-20T22:24:47,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/7be1259be7c7400b9cf13de8a1a91e6a is 50, key is test_row_0/B:col10/1732141485966/Put/seqid=0 2024-11-20T22:24:47,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742143_1319 (size=12301) 2024-11-20T22:24:47,570 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/7be1259be7c7400b9cf13de8a1a91e6a 2024-11-20T22:24:47,570 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/ac4bea7e8906448eaebf4f3951d95b10 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/ac4bea7e8906448eaebf4f3951d95b10 2024-11-20T22:24:47,576 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1ae43dfbfefd3b112decface0ed50cc2/C of 1ae43dfbfefd3b112decface0ed50cc2 into ac4bea7e8906448eaebf4f3951d95b10(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:47,576 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:47,576 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2., storeName=1ae43dfbfefd3b112decface0ed50cc2/C, priority=13, startTime=1732141487011; duration=0sec 2024-11-20T22:24:47,577 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:47,577 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ae43dfbfefd3b112decface0ed50cc2:C 2024-11-20T22:24:47,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/53d11a3110d3494fa223410d2815b8f8 is 50, key is test_row_0/C:col10/1732141485966/Put/seqid=0 2024-11-20T22:24:47,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742144_1320 (size=12301) 2024-11-20T22:24:48,023 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/53d11a3110d3494fa223410d2815b8f8 2024-11-20T22:24:48,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/6c8a77597203453eb27e349fe23aaeb4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/6c8a77597203453eb27e349fe23aaeb4 2024-11-20T22:24:48,044 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/6c8a77597203453eb27e349fe23aaeb4, entries=150, sequenceid=409, filesize=12.0 K 2024-11-20T22:24:48,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/7be1259be7c7400b9cf13de8a1a91e6a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/7be1259be7c7400b9cf13de8a1a91e6a 2024-11-20T22:24:48,049 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/7be1259be7c7400b9cf13de8a1a91e6a, entries=150, sequenceid=409, filesize=12.0 K 2024-11-20T22:24:48,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/53d11a3110d3494fa223410d2815b8f8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/53d11a3110d3494fa223410d2815b8f8 2024-11-20T22:24:48,069 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/53d11a3110d3494fa223410d2815b8f8, entries=150, sequenceid=409, filesize=12.0 K 2024-11-20T22:24:48,070 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=13.42 KB/13740 for 1ae43dfbfefd3b112decface0ed50cc2 in 1042ms, sequenceid=409, compaction requested=false 2024-11-20T22:24:48,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:48,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:48,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-20T22:24:48,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-20T22:24:48,072 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-20T22:24:48,072 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6690 sec 2024-11-20T22:24:48,073 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.6720 sec 2024-11-20T22:24:48,086 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T22:24:48,364 DEBUG [Thread-1080 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a874cc0 to 127.0.0.1:51822 2024-11-20T22:24:48,364 DEBUG [Thread-1080 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:48,364 DEBUG [Thread-1072 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22a568ce to 127.0.0.1:51822 2024-11-20T22:24:48,364 DEBUG [Thread-1072 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:48,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T22:24:48,519 INFO [Thread-1082 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-20T22:24:48,626 DEBUG [Thread-1076 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x473477dd to 127.0.0.1:51822 2024-11-20T22:24:48,626 DEBUG [Thread-1076 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 49 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2786 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2776 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2777 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2777 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2750 2024-11-20T22:24:48,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T22:24:48,627 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:24:48,627 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x00faa31c to 127.0.0.1:51822 2024-11-20T22:24:48,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:48,629 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T22:24:48,630 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T22:24:48,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:48,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T22:24:48,633 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141488633"}]},"ts":"1732141488633"} 2024-11-20T22:24:48,637 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T22:24:48,649 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T22:24:48,651 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:24:48,653 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1ae43dfbfefd3b112decface0ed50cc2, UNASSIGN}] 2024-11-20T22:24:48,654 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1ae43dfbfefd3b112decface0ed50cc2, UNASSIGN 2024-11-20T22:24:48,655 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=1ae43dfbfefd3b112decface0ed50cc2, regionState=CLOSING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:48,656 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:24:48,656 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; CloseRegionProcedure 1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:24:48,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T22:24:48,809 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:48,810 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(124): Close 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:48,810 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:24:48,810 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1681): Closing 1ae43dfbfefd3b112decface0ed50cc2, disabling compactions & flushes 2024-11-20T22:24:48,810 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:48,810 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:48,810 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. after waiting 0 ms 2024-11-20T22:24:48,810 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:48,810 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(2837): Flushing 1ae43dfbfefd3b112decface0ed50cc2 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T22:24:48,810 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=A 2024-11-20T22:24:48,810 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:48,811 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=B 2024-11-20T22:24:48,811 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:48,811 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1ae43dfbfefd3b112decface0ed50cc2, store=C 2024-11-20T22:24:48,811 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:48,818 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/bab27ed1532c4ccf9fcac3b5057bdda7 is 50, key is test_row_0/A:col10/1732141487128/Put/seqid=0 2024-11-20T22:24:48,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742145_1321 (size=12301) 2024-11-20T22:24:48,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T22:24:49,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T22:24:49,243 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/bab27ed1532c4ccf9fcac3b5057bdda7 2024-11-20T22:24:49,252 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/fb41ba5401424c86abeab1edb561e06a is 50, key is test_row_0/B:col10/1732141487128/Put/seqid=0 2024-11-20T22:24:49,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742146_1322 (size=12301) 2024-11-20T22:24:49,261 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/fb41ba5401424c86abeab1edb561e06a 2024-11-20T22:24:49,280 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/801ef31ae56e43dea29b8ffb7b2d3a85 is 50, key is test_row_0/C:col10/1732141487128/Put/seqid=0 2024-11-20T22:24:49,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742147_1323 (size=12301) 2024-11-20T22:24:49,302 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/801ef31ae56e43dea29b8ffb7b2d3a85 2024-11-20T22:24:49,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/A/bab27ed1532c4ccf9fcac3b5057bdda7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/bab27ed1532c4ccf9fcac3b5057bdda7 2024-11-20T22:24:49,325 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/bab27ed1532c4ccf9fcac3b5057bdda7, entries=150, sequenceid=420, filesize=12.0 K 2024-11-20T22:24:49,327 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/B/fb41ba5401424c86abeab1edb561e06a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/fb41ba5401424c86abeab1edb561e06a 2024-11-20T22:24:49,335 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/fb41ba5401424c86abeab1edb561e06a, entries=150, sequenceid=420, filesize=12.0 K 2024-11-20T22:24:49,336 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/.tmp/C/801ef31ae56e43dea29b8ffb7b2d3a85 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/801ef31ae56e43dea29b8ffb7b2d3a85 2024-11-20T22:24:49,343 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/801ef31ae56e43dea29b8ffb7b2d3a85, entries=150, sequenceid=420, filesize=12.0 K 2024-11-20T22:24:49,344 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 1ae43dfbfefd3b112decface0ed50cc2 in 534ms, sequenceid=420, compaction requested=true 2024-11-20T22:24:49,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/0ddb15b6d47341c088abd296a3ff57dc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/55106ddf21f147cc9bb9fc2d165fdb4c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/96c352d0948c4a51b607cc7650d46e0e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/a079e6617a1146ca8b15c6f6f93a1e3d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1dd5bd04440a4d5d868e3da06039766a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/e7e05530614b49c3bd50d4dbfe1b22d8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/b571feffea6a4576a67e28d408af0eb9, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/13242446c34d4bfb926ae51570ca65e5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1379ffa48d1044aa85fcd2f834096987, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/fd1cc53d20c74be3b07e704dd535abf6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/3358eda11ac34273894878aa1e6db6be, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/13b11c0fb3b24ebaba32ab8b7f01fcef, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/0a07d1359c144e61aa260b491521685e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/92a66bcc40c74a37b33eaf13591dbde9, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/40aa86b85a8d448d96729d4dc30dd354, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/bcb98197c5a0491485812ba7cf82e614, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/01e22282941b47dfa9832a0475b43ae2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/627a10f73530414a9ea40dfec773c064, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1d9cd2ed5151436bb122bb66eadf9053, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/6a63b67d06994797a4298190dc81351c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1224d8187c25423ca3729bdd983411b9, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f60da12f745f4439b58a4a05807fd7cd, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/ce3b11a1900641aa8c5b835becf0009b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/35be16cba52c4627b6185c3a8ef3cbed, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f8ed157014c241408d70cca34fe57f87, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/7ceb0a3637a946c296e3ecec1e638e7e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f607033e78454e299df9b1a3d970706b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/b14bb02c97964c57b84d6eb247dbf246] to archive 2024-11-20T22:24:49,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:24:49,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/0ddb15b6d47341c088abd296a3ff57dc to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/0ddb15b6d47341c088abd296a3ff57dc 2024-11-20T22:24:49,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/55106ddf21f147cc9bb9fc2d165fdb4c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/55106ddf21f147cc9bb9fc2d165fdb4c 2024-11-20T22:24:49,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/96c352d0948c4a51b607cc7650d46e0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/96c352d0948c4a51b607cc7650d46e0e 2024-11-20T22:24:49,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/a079e6617a1146ca8b15c6f6f93a1e3d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/a079e6617a1146ca8b15c6f6f93a1e3d 2024-11-20T22:24:49,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1dd5bd04440a4d5d868e3da06039766a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1dd5bd04440a4d5d868e3da06039766a 2024-11-20T22:24:49,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/e7e05530614b49c3bd50d4dbfe1b22d8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/e7e05530614b49c3bd50d4dbfe1b22d8 2024-11-20T22:24:49,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/b571feffea6a4576a67e28d408af0eb9 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/b571feffea6a4576a67e28d408af0eb9 2024-11-20T22:24:49,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/13242446c34d4bfb926ae51570ca65e5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/13242446c34d4bfb926ae51570ca65e5 2024-11-20T22:24:49,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1379ffa48d1044aa85fcd2f834096987 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1379ffa48d1044aa85fcd2f834096987 2024-11-20T22:24:49,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/fd1cc53d20c74be3b07e704dd535abf6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/fd1cc53d20c74be3b07e704dd535abf6 2024-11-20T22:24:49,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/3358eda11ac34273894878aa1e6db6be to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/3358eda11ac34273894878aa1e6db6be 2024-11-20T22:24:49,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/13b11c0fb3b24ebaba32ab8b7f01fcef to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/13b11c0fb3b24ebaba32ab8b7f01fcef 2024-11-20T22:24:49,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/0a07d1359c144e61aa260b491521685e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/0a07d1359c144e61aa260b491521685e 2024-11-20T22:24:49,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/92a66bcc40c74a37b33eaf13591dbde9 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/92a66bcc40c74a37b33eaf13591dbde9 2024-11-20T22:24:49,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/40aa86b85a8d448d96729d4dc30dd354 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/40aa86b85a8d448d96729d4dc30dd354 2024-11-20T22:24:49,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/bcb98197c5a0491485812ba7cf82e614 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/bcb98197c5a0491485812ba7cf82e614 2024-11-20T22:24:49,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/01e22282941b47dfa9832a0475b43ae2 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/01e22282941b47dfa9832a0475b43ae2 2024-11-20T22:24:49,417 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/627a10f73530414a9ea40dfec773c064 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/627a10f73530414a9ea40dfec773c064 2024-11-20T22:24:49,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1d9cd2ed5151436bb122bb66eadf9053 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1d9cd2ed5151436bb122bb66eadf9053 2024-11-20T22:24:49,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/6a63b67d06994797a4298190dc81351c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/6a63b67d06994797a4298190dc81351c 2024-11-20T22:24:49,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1224d8187c25423ca3729bdd983411b9 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/1224d8187c25423ca3729bdd983411b9 2024-11-20T22:24:49,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f60da12f745f4439b58a4a05807fd7cd to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f60da12f745f4439b58a4a05807fd7cd 2024-11-20T22:24:49,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/ce3b11a1900641aa8c5b835becf0009b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/ce3b11a1900641aa8c5b835becf0009b 2024-11-20T22:24:49,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/35be16cba52c4627b6185c3a8ef3cbed to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/35be16cba52c4627b6185c3a8ef3cbed 2024-11-20T22:24:49,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f8ed157014c241408d70cca34fe57f87 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f8ed157014c241408d70cca34fe57f87 2024-11-20T22:24:49,436 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/7ceb0a3637a946c296e3ecec1e638e7e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/7ceb0a3637a946c296e3ecec1e638e7e 2024-11-20T22:24:49,440 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f607033e78454e299df9b1a3d970706b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/f607033e78454e299df9b1a3d970706b 2024-11-20T22:24:49,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/b14bb02c97964c57b84d6eb247dbf246 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/b14bb02c97964c57b84d6eb247dbf246 2024-11-20T22:24:49,455 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/f9bca11bc79d4c6586ca722c5b75dbd4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/623514d9bf6f4305bf1d16a5836d7b3e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/3765cec605ba402895eb212045a72b8e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c39d965c15d340f381bd087ffb2c161e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/32fe3af0c6614c3cbf67f8c4e4c68cff, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/65168271972f4df1b2f2fd6c55b4b899, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/64dcf2db4d0e4723a584da75969ed301, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/1a1c4548a5814eb6858649fc77aa61f6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/9511ff5918e84d448772ae995a45fc5c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/44c98c57b3bc4c47b69cea1a6bba284e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/2636b8331b09416da7c32dcb74968adc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/06f7ffb79daf474f85f91fdec4e045c0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/b1c93c376d254d83920437c8c5a1955b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/07d0206eec26492ea402976370c5c759, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c8b252f1743747688317931515e48093, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/d31692a3ea01408c9d6b7ab39694d9bf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e25bc8a7e3724d18b96ad6fbe9bc1a50, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c0567ce1515943f79bafaf6870594c81, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/2ff3d8e6630a44cd9ffb362d27439b37, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/1d1b9b09080b40b8b6b8d5fc0c8ea687, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/16e5f925fbc3437da3c8e74e501ec131, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e53d64df50484d5cb191dbbcbf3b6bb2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/4ab55897be4b40548ca4feeb5e165f0f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/f8f07d7713c045558c30f28996741761, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/4d49c92b21d045d88841426281b20dbc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e52f784ea1734662b5727679bf0ef363, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/28fb7fd5599c4e46990d99484e7c614c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/80c4f3517aff4fe9a3131dc7fc9b5b57] to archive 2024-11-20T22:24:49,456 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:24:49,460 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/f9bca11bc79d4c6586ca722c5b75dbd4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/f9bca11bc79d4c6586ca722c5b75dbd4 2024-11-20T22:24:49,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/623514d9bf6f4305bf1d16a5836d7b3e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/623514d9bf6f4305bf1d16a5836d7b3e 2024-11-20T22:24:49,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/3765cec605ba402895eb212045a72b8e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/3765cec605ba402895eb212045a72b8e 2024-11-20T22:24:49,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c39d965c15d340f381bd087ffb2c161e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c39d965c15d340f381bd087ffb2c161e 2024-11-20T22:24:49,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/32fe3af0c6614c3cbf67f8c4e4c68cff to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/32fe3af0c6614c3cbf67f8c4e4c68cff 2024-11-20T22:24:49,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/65168271972f4df1b2f2fd6c55b4b899 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/65168271972f4df1b2f2fd6c55b4b899 2024-11-20T22:24:49,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/64dcf2db4d0e4723a584da75969ed301 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/64dcf2db4d0e4723a584da75969ed301 2024-11-20T22:24:49,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/1a1c4548a5814eb6858649fc77aa61f6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/1a1c4548a5814eb6858649fc77aa61f6 2024-11-20T22:24:49,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/9511ff5918e84d448772ae995a45fc5c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/9511ff5918e84d448772ae995a45fc5c 2024-11-20T22:24:49,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/44c98c57b3bc4c47b69cea1a6bba284e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/44c98c57b3bc4c47b69cea1a6bba284e 2024-11-20T22:24:49,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/2636b8331b09416da7c32dcb74968adc to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/2636b8331b09416da7c32dcb74968adc 2024-11-20T22:24:49,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/06f7ffb79daf474f85f91fdec4e045c0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/06f7ffb79daf474f85f91fdec4e045c0 2024-11-20T22:24:49,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/b1c93c376d254d83920437c8c5a1955b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/b1c93c376d254d83920437c8c5a1955b 2024-11-20T22:24:49,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/07d0206eec26492ea402976370c5c759 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/07d0206eec26492ea402976370c5c759 2024-11-20T22:24:49,516 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c8b252f1743747688317931515e48093 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c8b252f1743747688317931515e48093 2024-11-20T22:24:49,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/d31692a3ea01408c9d6b7ab39694d9bf to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/d31692a3ea01408c9d6b7ab39694d9bf 2024-11-20T22:24:49,518 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e25bc8a7e3724d18b96ad6fbe9bc1a50 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e25bc8a7e3724d18b96ad6fbe9bc1a50 2024-11-20T22:24:49,519 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c0567ce1515943f79bafaf6870594c81 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/c0567ce1515943f79bafaf6870594c81 2024-11-20T22:24:49,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/2ff3d8e6630a44cd9ffb362d27439b37 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/2ff3d8e6630a44cd9ffb362d27439b37 2024-11-20T22:24:49,531 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/1d1b9b09080b40b8b6b8d5fc0c8ea687 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/1d1b9b09080b40b8b6b8d5fc0c8ea687 2024-11-20T22:24:49,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/16e5f925fbc3437da3c8e74e501ec131 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/16e5f925fbc3437da3c8e74e501ec131 2024-11-20T22:24:49,541 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e53d64df50484d5cb191dbbcbf3b6bb2 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e53d64df50484d5cb191dbbcbf3b6bb2 2024-11-20T22:24:49,542 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/4ab55897be4b40548ca4feeb5e165f0f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/4ab55897be4b40548ca4feeb5e165f0f 2024-11-20T22:24:49,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/f8f07d7713c045558c30f28996741761 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/f8f07d7713c045558c30f28996741761 2024-11-20T22:24:49,544 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/4d49c92b21d045d88841426281b20dbc to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/4d49c92b21d045d88841426281b20dbc 2024-11-20T22:24:49,545 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e52f784ea1734662b5727679bf0ef363 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/e52f784ea1734662b5727679bf0ef363 2024-11-20T22:24:49,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/28fb7fd5599c4e46990d99484e7c614c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/28fb7fd5599c4e46990d99484e7c614c 2024-11-20T22:24:49,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/80c4f3517aff4fe9a3131dc7fc9b5b57 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/80c4f3517aff4fe9a3131dc7fc9b5b57 2024-11-20T22:24:49,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/83f6c1b5eace43abb193677c801a3f56, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/9c25b9943a7646f99669eb8a65c2fbed, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/16a6a034a67f49668a438e14de202893, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/e2386bdcfc3e40d697592e3b7946c8a3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/a25aba0e1bcc4938954431e2fe903500, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/5e7f49a5e0de4a6a87c9efff8c99c249, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/da15edadbdc74415932d1478e4a34732, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/67203502698f41e18aab95acf3967314, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/b5f11d593c1b4ee995faa6f7e244d74a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/2a1b76bd4abc49388605aeac2df11bad, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/bf81eb17af844f378aba787de5f269e6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/0e2705a549834475bf51a64c6e86640d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/ded08eb651e24aef87ce033e2965ec44, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/b7b5b2610bc246ce938cd93f67a7be9d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/0519d94d37f649d5864287dac308d2d5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/99ca2cab4bf747c3843e548380b5ba5a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/731a635790464fc883c130b5222af593, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/695dd35a7c424a49bdce9ce2c3fa679f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/3435af535e21428e97820cf67ea2b6f0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/4dde0fb747e24cad9ca19a3bc2b7a45d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1219d20d7b544331a7dd42bb3c07cc6e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/13f4f5ec647a431083f12cbdf3a05a0b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/4c47f3c3b67942269507a3043a4ff836, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/76f613be932848f1890cb33d22c0f3bc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1cb157449b6d4a168061e32ed2e90392, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/867b9249a8834fe0b0b0aca2367de70f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/828938e435f54fb5892f58052e5cf494, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1bbe73bf25ef4adc81f50e70e08fc37e] to archive 2024-11-20T22:24:49,548 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:24:49,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/83f6c1b5eace43abb193677c801a3f56 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/83f6c1b5eace43abb193677c801a3f56 2024-11-20T22:24:49,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/9c25b9943a7646f99669eb8a65c2fbed to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/9c25b9943a7646f99669eb8a65c2fbed 2024-11-20T22:24:49,551 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/16a6a034a67f49668a438e14de202893 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/16a6a034a67f49668a438e14de202893 2024-11-20T22:24:49,552 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/e2386bdcfc3e40d697592e3b7946c8a3 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/e2386bdcfc3e40d697592e3b7946c8a3 2024-11-20T22:24:49,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/a25aba0e1bcc4938954431e2fe903500 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/a25aba0e1bcc4938954431e2fe903500 2024-11-20T22:24:49,558 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/5e7f49a5e0de4a6a87c9efff8c99c249 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/5e7f49a5e0de4a6a87c9efff8c99c249 2024-11-20T22:24:49,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/da15edadbdc74415932d1478e4a34732 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/da15edadbdc74415932d1478e4a34732 2024-11-20T22:24:49,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/67203502698f41e18aab95acf3967314 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/67203502698f41e18aab95acf3967314 2024-11-20T22:24:49,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/b5f11d593c1b4ee995faa6f7e244d74a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/b5f11d593c1b4ee995faa6f7e244d74a 2024-11-20T22:24:49,562 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/2a1b76bd4abc49388605aeac2df11bad to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/2a1b76bd4abc49388605aeac2df11bad 2024-11-20T22:24:49,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/bf81eb17af844f378aba787de5f269e6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/bf81eb17af844f378aba787de5f269e6 2024-11-20T22:24:49,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/0e2705a549834475bf51a64c6e86640d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/0e2705a549834475bf51a64c6e86640d 2024-11-20T22:24:49,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/ded08eb651e24aef87ce033e2965ec44 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/ded08eb651e24aef87ce033e2965ec44 2024-11-20T22:24:49,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/b7b5b2610bc246ce938cd93f67a7be9d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/b7b5b2610bc246ce938cd93f67a7be9d 2024-11-20T22:24:49,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/0519d94d37f649d5864287dac308d2d5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/0519d94d37f649d5864287dac308d2d5 2024-11-20T22:24:49,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/99ca2cab4bf747c3843e548380b5ba5a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/99ca2cab4bf747c3843e548380b5ba5a 2024-11-20T22:24:49,569 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/731a635790464fc883c130b5222af593 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/731a635790464fc883c130b5222af593 2024-11-20T22:24:49,570 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/695dd35a7c424a49bdce9ce2c3fa679f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/695dd35a7c424a49bdce9ce2c3fa679f 2024-11-20T22:24:49,571 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/3435af535e21428e97820cf67ea2b6f0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/3435af535e21428e97820cf67ea2b6f0 2024-11-20T22:24:49,572 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/4dde0fb747e24cad9ca19a3bc2b7a45d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/4dde0fb747e24cad9ca19a3bc2b7a45d 2024-11-20T22:24:49,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1219d20d7b544331a7dd42bb3c07cc6e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1219d20d7b544331a7dd42bb3c07cc6e 2024-11-20T22:24:49,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/13f4f5ec647a431083f12cbdf3a05a0b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/13f4f5ec647a431083f12cbdf3a05a0b 2024-11-20T22:24:49,575 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/4c47f3c3b67942269507a3043a4ff836 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/4c47f3c3b67942269507a3043a4ff836 2024-11-20T22:24:49,576 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/76f613be932848f1890cb33d22c0f3bc to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/76f613be932848f1890cb33d22c0f3bc 2024-11-20T22:24:49,586 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1cb157449b6d4a168061e32ed2e90392 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1cb157449b6d4a168061e32ed2e90392 2024-11-20T22:24:49,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/867b9249a8834fe0b0b0aca2367de70f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/867b9249a8834fe0b0b0aca2367de70f 2024-11-20T22:24:49,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/828938e435f54fb5892f58052e5cf494 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/828938e435f54fb5892f58052e5cf494 2024-11-20T22:24:49,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1bbe73bf25ef4adc81f50e70e08fc37e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/1bbe73bf25ef4adc81f50e70e08fc37e 2024-11-20T22:24:49,602 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/recovered.edits/423.seqid, newMaxSeqId=423, maxSeqId=1 2024-11-20T22:24:49,603 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2. 2024-11-20T22:24:49,603 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1635): Region close journal for 1ae43dfbfefd3b112decface0ed50cc2: 2024-11-20T22:24:49,604 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(170): Closed 1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:49,605 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=1ae43dfbfefd3b112decface0ed50cc2, regionState=CLOSED 2024-11-20T22:24:49,612 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T22:24:49,615 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-20T22:24:49,616 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseRegionProcedure 1ae43dfbfefd3b112decface0ed50cc2, server=6365a1e51efd,44631,1732141399950 in 958 msec 2024-11-20T22:24:49,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-11-20T22:24:49,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1ae43dfbfefd3b112decface0ed50cc2, UNASSIGN in 963 msec 2024-11-20T22:24:49,620 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-20T22:24:49,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 969 msec 2024-11-20T22:24:49,623 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141489622"}]},"ts":"1732141489622"} 2024-11-20T22:24:49,624 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T22:24:49,676 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T22:24:49,678 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.0470 sec 2024-11-20T22:24:49,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T22:24:49,736 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-20T22:24:49,736 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T22:24:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:49,738 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:49,738 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:49,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T22:24:49,742 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:49,747 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/recovered.edits] 2024-11-20T22:24:49,781 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/6c8a77597203453eb27e349fe23aaeb4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/6c8a77597203453eb27e349fe23aaeb4 2024-11-20T22:24:49,783 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/8d0ac7736b344d45b36166fdc4d61b6c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/8d0ac7736b344d45b36166fdc4d61b6c 2024-11-20T22:24:49,785 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/bab27ed1532c4ccf9fcac3b5057bdda7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/A/bab27ed1532c4ccf9fcac3b5057bdda7 2024-11-20T22:24:49,787 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/7be1259be7c7400b9cf13de8a1a91e6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/7be1259be7c7400b9cf13de8a1a91e6a 2024-11-20T22:24:49,789 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/dd6b670b1699448abf38be2bce9c8460 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/dd6b670b1699448abf38be2bce9c8460 2024-11-20T22:24:49,791 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/fb41ba5401424c86abeab1edb561e06a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/B/fb41ba5401424c86abeab1edb561e06a 2024-11-20T22:24:49,818 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/53d11a3110d3494fa223410d2815b8f8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/53d11a3110d3494fa223410d2815b8f8 2024-11-20T22:24:49,835 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/801ef31ae56e43dea29b8ffb7b2d3a85 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/801ef31ae56e43dea29b8ffb7b2d3a85 2024-11-20T22:24:49,836 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/ac4bea7e8906448eaebf4f3951d95b10 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/C/ac4bea7e8906448eaebf4f3951d95b10 2024-11-20T22:24:49,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T22:24:49,852 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/recovered.edits/423.seqid to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2/recovered.edits/423.seqid 2024-11-20T22:24:49,852 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/1ae43dfbfefd3b112decface0ed50cc2 2024-11-20T22:24:49,852 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T22:24:49,870 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:49,887 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T22:24:49,892 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T22:24:49,896 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:49,896 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T22:24:49,896 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732141489896"}]},"ts":"9223372036854775807"} 2024-11-20T22:24:49,903 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T22:24:49,903 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 1ae43dfbfefd3b112decface0ed50cc2, NAME => 'TestAcidGuarantees,,1732141465885.1ae43dfbfefd3b112decface0ed50cc2.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T22:24:49,903 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T22:24:49,905 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732141489903"}]},"ts":"9223372036854775807"} 2024-11-20T22:24:49,911 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T22:24:49,926 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:49,927 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 190 msec 2024-11-20T22:24:50,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T22:24:50,051 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-20T22:24:50,074 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=239 (was 237) - Thread LEAK? -, OpenFileDescriptor=455 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1069 (was 1058) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1948 (was 2353) 2024-11-20T22:24:50,093 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=239, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=1069, ProcessCount=11, AvailableMemoryMB=1948 2024-11-20T22:24:50,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:24:50,096 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:24:50,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:50,098 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:24:50,098 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:50,099 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:24:50,100 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 94 2024-11-20T22:24:50,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-20T22:24:50,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742148_1324 (size=960) 2024-11-20T22:24:50,157 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a 2024-11-20T22:24:50,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742149_1325 (size=53) 2024-11-20T22:24:50,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-20T22:24:50,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-20T22:24:50,598 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:24:50,598 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing bcb32e1c322c877c5cddb0d2b8bcab6a, disabling compactions & flushes 2024-11-20T22:24:50,599 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:50,599 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:50,599 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. after waiting 0 ms 2024-11-20T22:24:50,599 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:50,599 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:50,599 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:50,600 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:24:50,600 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732141490600"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141490600"}]},"ts":"1732141490600"} 2024-11-20T22:24:50,601 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:24:50,601 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:24:50,601 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141490601"}]},"ts":"1732141490601"} 2024-11-20T22:24:50,603 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T22:24:50,618 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bcb32e1c322c877c5cddb0d2b8bcab6a, ASSIGN}] 2024-11-20T22:24:50,619 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bcb32e1c322c877c5cddb0d2b8bcab6a, ASSIGN 2024-11-20T22:24:50,619 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=bcb32e1c322c877c5cddb0d2b8bcab6a, ASSIGN; state=OFFLINE, location=6365a1e51efd,44631,1732141399950; forceNewPlan=false, retain=false 2024-11-20T22:24:50,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-20T22:24:50,770 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=bcb32e1c322c877c5cddb0d2b8bcab6a, regionState=OPENING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:50,771 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; OpenRegionProcedure bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:24:50,922 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:50,925 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:50,925 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7285): Opening region: {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:24:50,925 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:50,925 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:24:50,925 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7327): checking encryption for bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:50,925 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7330): checking classloading for bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:50,926 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:50,928 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:50,928 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bcb32e1c322c877c5cddb0d2b8bcab6a columnFamilyName A 2024-11-20T22:24:50,928 DEBUG [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:50,929 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.HStore(327): Store=bcb32e1c322c877c5cddb0d2b8bcab6a/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:50,929 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:50,930 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:50,930 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bcb32e1c322c877c5cddb0d2b8bcab6a columnFamilyName B 2024-11-20T22:24:50,930 DEBUG [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:50,930 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.HStore(327): Store=bcb32e1c322c877c5cddb0d2b8bcab6a/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:50,930 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:50,931 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:50,931 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bcb32e1c322c877c5cddb0d2b8bcab6a columnFamilyName C 2024-11-20T22:24:50,931 DEBUG [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:50,931 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.HStore(327): Store=bcb32e1c322c877c5cddb0d2b8bcab6a/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:50,932 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:50,932 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:50,932 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:50,934 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:24:50,935 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1085): writing seq id for bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:50,937 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:24:50,938 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1102): Opened bcb32e1c322c877c5cddb0d2b8bcab6a; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68798226, jitterRate=0.025173455476760864}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:24:50,939 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1001): Region open journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:50,940 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., pid=96, masterSystemTime=1732141490922 2024-11-20T22:24:50,941 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:50,941 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:50,941 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=bcb32e1c322c877c5cddb0d2b8bcab6a, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:50,943 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-20T22:24:50,943 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; OpenRegionProcedure bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 in 171 msec 2024-11-20T22:24:50,944 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-11-20T22:24:50,944 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bcb32e1c322c877c5cddb0d2b8bcab6a, ASSIGN in 325 msec 2024-11-20T22:24:50,944 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:24:50,945 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141490944"}]},"ts":"1732141490944"} 2024-11-20T22:24:50,945 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T22:24:50,951 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:24:50,953 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 855 msec 2024-11-20T22:24:51,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-20T22:24:51,206 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 94 completed 2024-11-20T22:24:51,208 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4dacfd49 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5271608e 2024-11-20T22:24:51,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f9fed4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:51,256 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:51,259 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48050, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:51,273 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:24:51,277 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36882, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:24:51,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:24:51,284 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:24:51,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:51,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742150_1326 (size=996) 2024-11-20T22:24:51,747 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T22:24:51,747 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T22:24:51,750 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:24:51,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bcb32e1c322c877c5cddb0d2b8bcab6a, REOPEN/MOVE}] 2024-11-20T22:24:51,763 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bcb32e1c322c877c5cddb0d2b8bcab6a, REOPEN/MOVE 2024-11-20T22:24:51,764 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=bcb32e1c322c877c5cddb0d2b8bcab6a, regionState=CLOSING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:51,765 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:24:51,765 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; CloseRegionProcedure bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:24:51,919 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:51,919 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(124): Close bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:51,919 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:24:51,919 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1681): Closing bcb32e1c322c877c5cddb0d2b8bcab6a, disabling compactions & flushes 2024-11-20T22:24:51,919 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:51,919 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:51,920 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. after waiting 0 ms 2024-11-20T22:24:51,920 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:51,923 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T22:24:51,923 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:51,923 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1635): Region close journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:51,923 WARN [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegionServer(3786): Not adding moved region record: bcb32e1c322c877c5cddb0d2b8bcab6a to self. 2024-11-20T22:24:51,924 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(170): Closed bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:51,925 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=bcb32e1c322c877c5cddb0d2b8bcab6a, regionState=CLOSED 2024-11-20T22:24:51,926 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-20T22:24:51,926 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; CloseRegionProcedure bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 in 160 msec 2024-11-20T22:24:51,926 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=bcb32e1c322c877c5cddb0d2b8bcab6a, REOPEN/MOVE; state=CLOSED, location=6365a1e51efd,44631,1732141399950; forceNewPlan=false, retain=true 2024-11-20T22:24:52,077 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=bcb32e1c322c877c5cddb0d2b8bcab6a, regionState=OPENING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,078 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=99, state=RUNNABLE; OpenRegionProcedure bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:24:52,229 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,232 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:52,232 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7285): Opening region: {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:24:52,233 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:52,233 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:24:52,233 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7327): checking encryption for bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:52,233 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7330): checking classloading for bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:52,235 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:52,236 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:52,236 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bcb32e1c322c877c5cddb0d2b8bcab6a columnFamilyName A 2024-11-20T22:24:52,237 DEBUG [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:52,237 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.HStore(327): Store=bcb32e1c322c877c5cddb0d2b8bcab6a/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:52,238 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:52,239 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:52,239 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bcb32e1c322c877c5cddb0d2b8bcab6a columnFamilyName B 2024-11-20T22:24:52,239 DEBUG [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:52,239 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.HStore(327): Store=bcb32e1c322c877c5cddb0d2b8bcab6a/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:52,239 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:52,240 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:52,240 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bcb32e1c322c877c5cddb0d2b8bcab6a columnFamilyName C 2024-11-20T22:24:52,240 DEBUG [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:52,241 INFO [StoreOpener-bcb32e1c322c877c5cddb0d2b8bcab6a-1 {}] regionserver.HStore(327): Store=bcb32e1c322c877c5cddb0d2b8bcab6a/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:52,241 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:52,242 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:52,243 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:52,244 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:24:52,248 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1085): writing seq id for bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:52,250 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1102): Opened bcb32e1c322c877c5cddb0d2b8bcab6a; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67253900, jitterRate=0.0021612048149108887}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:24:52,251 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1001): Region open journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:52,252 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., pid=101, masterSystemTime=1732141492229 2024-11-20T22:24:52,254 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:52,255 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:52,255 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=bcb32e1c322c877c5cddb0d2b8bcab6a, regionState=OPEN, openSeqNum=5, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-11-20T22:24:52,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; OpenRegionProcedure bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 in 177 msec 2024-11-20T22:24:52,257 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-20T22:24:52,257 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bcb32e1c322c877c5cddb0d2b8bcab6a, REOPEN/MOVE in 501 msec 2024-11-20T22:24:52,259 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-20T22:24:52,259 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 508 msec 2024-11-20T22:24:52,262 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 975 msec 2024-11-20T22:24:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-20T22:24:52,264 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x033feebb to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a139b42 2024-11-20T22:24:52,375 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1157d18a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:52,376 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c40db2e to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1acf826f 2024-11-20T22:24:52,393 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bcb3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:52,394 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a86cb71 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1cbce2b4 2024-11-20T22:24:52,410 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77b5b03d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:52,411 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3401188a to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4fd3f5fc 2024-11-20T22:24:52,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15bd9063, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:52,419 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x55650656 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c97513 2024-11-20T22:24:52,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c0ec341, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:52,427 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42af2962 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4831febd 2024-11-20T22:24:52,435 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b660061, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:52,435 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5910b8c7 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e93614e 2024-11-20T22:24:52,443 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45ad0ff5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:52,444 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x003f9a05 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@523025d 2024-11-20T22:24:52,451 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28dc77ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:52,452 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26b6d860 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b9a1701 2024-11-20T22:24:52,466 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70304ef6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:52,467 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x16722a1f to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d3b05cf 2024-11-20T22:24:52,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f8ea360, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:52,507 DEBUG [hconnection-0x3d0dc0af-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:52,508 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:52,515 DEBUG [hconnection-0x41ea280-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:52,515 DEBUG [hconnection-0x72309aef-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:52,516 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48078, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:52,516 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48064, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:52,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:52,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:52,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:24:52,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:52,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:24:52,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:52,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:24:52,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:52,523 DEBUG [hconnection-0x434dbc89-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:52,524 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48086, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:52,527 DEBUG [hconnection-0x42df022f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:52,528 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48102, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:52,535 DEBUG [hconnection-0x665d31-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:52,536 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48114, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:52,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141552536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141552537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,539 DEBUG [hconnection-0x724f4e15-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:52,540 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48128, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:52,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141552541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,548 DEBUG [hconnection-0x47012985-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:52,549 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48136, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:52,567 DEBUG [hconnection-0x6117c1e8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:52,568 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48138, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:52,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141552569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,575 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-11-20T22:24:52,576 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:52,577 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:52,577 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:52,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:24:52,587 DEBUG [hconnection-0x169ace45-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:52,588 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:52,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141552589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a0b0194b35aa49ef882485ad260c61a7_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141492517/Put/seqid=0 2024-11-20T22:24:52,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141552638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141552641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141552644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742151_1327 (size=12154) 2024-11-20T22:24:52,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141552671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:24:52,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141552690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,728 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:24:52,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:52,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:52,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:52,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:52,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:52,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141552842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141552843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141552847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141552873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:24:52,880 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:52,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:24:52,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:52,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:52,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:52,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:52,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:52,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:52,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141552894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,039 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:24:53,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:53,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,039 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,053 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:53,058 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a0b0194b35aa49ef882485ad260c61a7_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a0b0194b35aa49ef882485ad260c61a7_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:53,059 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/7e78901d475845ffa5a108b6e418c468, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:53,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/7e78901d475845ffa5a108b6e418c468 is 175, key is test_row_0/A:col10/1732141492517/Put/seqid=0 2024-11-20T22:24:53,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742152_1328 (size=30955) 2024-11-20T22:24:53,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141553146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141553148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141553148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:24:53,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141553175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,192 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:24:53,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:53,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,194 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141553197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,346 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:24:53,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:53,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,512 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:24:53,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:53,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,513 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,516 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/7e78901d475845ffa5a108b6e418c468 2024-11-20T22:24:53,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/768bae251c944c008a10e53c40b164c5 is 50, key is test_row_0/B:col10/1732141492517/Put/seqid=0 2024-11-20T22:24:53,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742153_1329 (size=12001) 2024-11-20T22:24:53,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141553651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141553652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141553655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,666 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,666 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:24:53,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:53,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:24:53,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141553690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141553719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,818 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:24:53,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:53,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,819 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,971 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:53,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:24:53,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:53,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:53,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/768bae251c944c008a10e53c40b164c5 2024-11-20T22:24:54,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/59d9d31b0aa2408b8d5c9466d7eac3ed is 50, key is test_row_0/C:col10/1732141492517/Put/seqid=0 2024-11-20T22:24:54,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742154_1330 (size=12001) 2024-11-20T22:24:54,112 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/59d9d31b0aa2408b8d5c9466d7eac3ed 2024-11-20T22:24:54,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/7e78901d475845ffa5a108b6e418c468 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/7e78901d475845ffa5a108b6e418c468 2024-11-20T22:24:54,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/7e78901d475845ffa5a108b6e418c468, entries=150, sequenceid=15, filesize=30.2 K 2024-11-20T22:24:54,123 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:54,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/768bae251c944c008a10e53c40b164c5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/768bae251c944c008a10e53c40b164c5 2024-11-20T22:24:54,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:24:54,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:54,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:54,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:54,124 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/768bae251c944c008a10e53c40b164c5, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T22:24:54,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/59d9d31b0aa2408b8d5c9466d7eac3ed as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/59d9d31b0aa2408b8d5c9466d7eac3ed 2024-11-20T22:24:54,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/59d9d31b0aa2408b8d5c9466d7eac3ed, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T22:24:54,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for bcb32e1c322c877c5cddb0d2b8bcab6a in 1616ms, sequenceid=15, compaction requested=false 2024-11-20T22:24:54,135 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T22:24:54,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:54,277 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:54,277 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:24:54,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:54,277 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:24:54,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:24:54,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:54,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:24:54,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:54,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:24:54,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:54,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201d07a12afe0c4bd1a777d304259ca9fe_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141492535/Put/seqid=0 2024-11-20T22:24:54,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742155_1331 (size=12154) 2024-11-20T22:24:54,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:54,341 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201d07a12afe0c4bd1a777d304259ca9fe_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201d07a12afe0c4bd1a777d304259ca9fe_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:54,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/de43a769275e48c29cd4e748beb61ff0, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:54,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/de43a769275e48c29cd4e748beb61ff0 is 175, key is test_row_0/A:col10/1732141492535/Put/seqid=0 2024-11-20T22:24:54,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742156_1332 (size=30955) 2024-11-20T22:24:54,393 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/de43a769275e48c29cd4e748beb61ff0 2024-11-20T22:24:54,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/70a0e5af319e40c09ae042e7eef1c106 is 50, key is test_row_0/B:col10/1732141492535/Put/seqid=0 2024-11-20T22:24:54,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742157_1333 (size=12001) 2024-11-20T22:24:54,459 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/70a0e5af319e40c09ae042e7eef1c106 2024-11-20T22:24:54,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/99a24d57bd3e43509e27eb5590efd3ec is 50, key is test_row_0/C:col10/1732141492535/Put/seqid=0 2024-11-20T22:24:54,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742158_1334 (size=12001) 2024-11-20T22:24:54,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:54,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:54,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:24:54,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141554684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:54,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141554684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:54,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141554687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:54,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141554698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:54,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141554730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:54,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141554795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:54,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141554796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:54,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141554796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:54,926 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/99a24d57bd3e43509e27eb5590efd3ec 2024-11-20T22:24:54,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/de43a769275e48c29cd4e748beb61ff0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/de43a769275e48c29cd4e748beb61ff0 2024-11-20T22:24:54,940 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/de43a769275e48c29cd4e748beb61ff0, entries=150, sequenceid=40, filesize=30.2 K 2024-11-20T22:24:54,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/70a0e5af319e40c09ae042e7eef1c106 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/70a0e5af319e40c09ae042e7eef1c106 2024-11-20T22:24:54,951 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/70a0e5af319e40c09ae042e7eef1c106, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T22:24:54,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/99a24d57bd3e43509e27eb5590efd3ec as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/99a24d57bd3e43509e27eb5590efd3ec 2024-11-20T22:24:54,960 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/99a24d57bd3e43509e27eb5590efd3ec, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T22:24:54,961 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for bcb32e1c322c877c5cddb0d2b8bcab6a in 684ms, sequenceid=40, compaction requested=false 2024-11-20T22:24:54,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:54,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:54,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-20T22:24:54,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-11-20T22:24:54,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-20T22:24:54,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3870 sec 2024-11-20T22:24:54,968 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 2.3910 sec 2024-11-20T22:24:55,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:24:55,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:24:55,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:55,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:24:55,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:55,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:24:55,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:55,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:55,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120482073ac44aa47beaac7ed641be24201_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141495008/Put/seqid=0 2024-11-20T22:24:55,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742159_1335 (size=14594) 2024-11-20T22:24:55,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141555105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:55,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141555107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:55,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141555114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:55,159 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T22:24:55,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141555215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:55,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141555216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:55,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141555228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:55,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141555431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:55,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141555432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:55,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141555436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:55,477 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:55,482 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120482073ac44aa47beaac7ed641be24201_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120482073ac44aa47beaac7ed641be24201_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:55,483 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/128fb923a88c4e0cb5c790a0b425ac14, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:55,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/128fb923a88c4e0cb5c790a0b425ac14 is 175, key is test_row_0/A:col10/1732141495008/Put/seqid=0 2024-11-20T22:24:55,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742160_1336 (size=39549) 2024-11-20T22:24:55,537 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/128fb923a88c4e0cb5c790a0b425ac14 2024-11-20T22:24:55,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/2f30e1fd8b194e128ca27d373a69c832 is 50, key is test_row_0/B:col10/1732141495008/Put/seqid=0 2024-11-20T22:24:55,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742161_1337 (size=12001) 2024-11-20T22:24:55,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141555740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:55,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141555743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:55,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141555743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:55,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/2f30e1fd8b194e128ca27d373a69c832 2024-11-20T22:24:56,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/17970aae36eb4b88bfbe25a679e6adcc is 50, key is test_row_0/C:col10/1732141495008/Put/seqid=0 2024-11-20T22:24:56,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742162_1338 (size=12001) 2024-11-20T22:24:56,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/17970aae36eb4b88bfbe25a679e6adcc 2024-11-20T22:24:56,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/128fb923a88c4e0cb5c790a0b425ac14 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/128fb923a88c4e0cb5c790a0b425ac14 2024-11-20T22:24:56,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/128fb923a88c4e0cb5c790a0b425ac14, entries=200, sequenceid=52, filesize=38.6 K 2024-11-20T22:24:56,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/2f30e1fd8b194e128ca27d373a69c832 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2f30e1fd8b194e128ca27d373a69c832 2024-11-20T22:24:56,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2f30e1fd8b194e128ca27d373a69c832, entries=150, sequenceid=52, filesize=11.7 K 2024-11-20T22:24:56,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/17970aae36eb4b88bfbe25a679e6adcc as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/17970aae36eb4b88bfbe25a679e6adcc 2024-11-20T22:24:56,115 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/17970aae36eb4b88bfbe25a679e6adcc, entries=150, sequenceid=52, filesize=11.7 K 2024-11-20T22:24:56,116 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for bcb32e1c322c877c5cddb0d2b8bcab6a in 1106ms, sequenceid=52, compaction requested=true 2024-11-20T22:24:56,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:56,116 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:56,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:56,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:56,117 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:56,117 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:56,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:56,117 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/A is initiating minor compaction (all files) 2024-11-20T22:24:56,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:56,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:56,117 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/A in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:56,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:56,117 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/7e78901d475845ffa5a108b6e418c468, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/de43a769275e48c29cd4e748beb61ff0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/128fb923a88c4e0cb5c790a0b425ac14] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=99.1 K 2024-11-20T22:24:56,117 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:56,117 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/7e78901d475845ffa5a108b6e418c468, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/de43a769275e48c29cd4e748beb61ff0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/128fb923a88c4e0cb5c790a0b425ac14] 2024-11-20T22:24:56,118 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e78901d475845ffa5a108b6e418c468, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141492513 2024-11-20T22:24:56,118 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:56,118 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/B is initiating minor compaction (all files) 2024-11-20T22:24:56,118 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/B in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:56,118 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/768bae251c944c008a10e53c40b164c5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/70a0e5af319e40c09ae042e7eef1c106, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2f30e1fd8b194e128ca27d373a69c832] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=35.2 K 2024-11-20T22:24:56,118 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting de43a769275e48c29cd4e748beb61ff0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732141492534 2024-11-20T22:24:56,118 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 768bae251c944c008a10e53c40b164c5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141492513 2024-11-20T22:24:56,119 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 128fb923a88c4e0cb5c790a0b425ac14, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732141494674 2024-11-20T22:24:56,119 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 70a0e5af319e40c09ae042e7eef1c106, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732141492534 2024-11-20T22:24:56,120 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f30e1fd8b194e128ca27d373a69c832, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732141494674 2024-11-20T22:24:56,140 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:56,159 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120b04ad813bd594fc8b8479247e82d7d71_bcb32e1c322c877c5cddb0d2b8bcab6a store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:56,160 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#B#compaction#286 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:56,161 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/42a50ab1cba242e0ac0b42927f58c279 is 50, key is test_row_0/B:col10/1732141495008/Put/seqid=0 2024-11-20T22:24:56,162 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120b04ad813bd594fc8b8479247e82d7d71_bcb32e1c322c877c5cddb0d2b8bcab6a, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:56,162 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b04ad813bd594fc8b8479247e82d7d71_bcb32e1c322c877c5cddb0d2b8bcab6a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:56,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742163_1339 (size=4469) 2024-11-20T22:24:56,221 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#A#compaction#285 average throughput is 0.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:56,221 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/e18defaa0ee74b5c8c3bbbe65cab78d1 is 175, key is test_row_0/A:col10/1732141495008/Put/seqid=0 2024-11-20T22:24:56,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742164_1340 (size=12104) 2024-11-20T22:24:56,249 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/42a50ab1cba242e0ac0b42927f58c279 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/42a50ab1cba242e0ac0b42927f58c279 2024-11-20T22:24:56,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:56,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:24:56,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:24:56,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:56,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:24:56,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:56,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:24:56,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:56,259 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/B of bcb32e1c322c877c5cddb0d2b8bcab6a into 42a50ab1cba242e0ac0b42927f58c279(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:56,260 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:56,260 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/B, priority=13, startTime=1732141496116; duration=0sec 2024-11-20T22:24:56,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742165_1341 (size=31058) 2024-11-20T22:24:56,260 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:56,260 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:B 2024-11-20T22:24:56,260 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:56,262 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:56,263 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/C is initiating minor compaction (all files) 2024-11-20T22:24:56,263 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/C in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:56,263 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/59d9d31b0aa2408b8d5c9466d7eac3ed, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/99a24d57bd3e43509e27eb5590efd3ec, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/17970aae36eb4b88bfbe25a679e6adcc] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=35.2 K 2024-11-20T22:24:56,264 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 59d9d31b0aa2408b8d5c9466d7eac3ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141492513 2024-11-20T22:24:56,264 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 99a24d57bd3e43509e27eb5590efd3ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732141492534 2024-11-20T22:24:56,265 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 17970aae36eb4b88bfbe25a679e6adcc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732141494674 2024-11-20T22:24:56,268 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/e18defaa0ee74b5c8c3bbbe65cab78d1 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e18defaa0ee74b5c8c3bbbe65cab78d1 2024-11-20T22:24:56,276 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/A of bcb32e1c322c877c5cddb0d2b8bcab6a into e18defaa0ee74b5c8c3bbbe65cab78d1(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:56,276 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:56,276 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/A, priority=13, startTime=1732141496116; duration=0sec 2024-11-20T22:24:56,277 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:56,277 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:A 2024-11-20T22:24:56,285 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#C#compaction#287 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:56,285 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/df1cced665dc4466b57e96294d2a1f33 is 50, key is test_row_0/C:col10/1732141495008/Put/seqid=0 2024-11-20T22:24:56,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120344b5bae559441818650bfae1e0801f6_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141495105/Put/seqid=0 2024-11-20T22:24:56,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141556299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141556301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141556301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742167_1343 (size=14594) 2024-11-20T22:24:56,330 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:56,341 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120344b5bae559441818650bfae1e0801f6_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120344b5bae559441818650bfae1e0801f6_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:56,343 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/add0aafb054f4bfe99738ae6d82728bc, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:56,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/add0aafb054f4bfe99738ae6d82728bc is 175, key is test_row_0/A:col10/1732141495105/Put/seqid=0 2024-11-20T22:24:56,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742166_1342 (size=12104) 2024-11-20T22:24:56,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742168_1344 (size=39549) 2024-11-20T22:24:56,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141556410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141556413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141556414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141556620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141556629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141556630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:24:56,685 INFO [Thread-1494 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-20T22:24:56,686 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:56,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-20T22:24:56,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:24:56,688 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:56,688 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:56,689 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:56,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141556712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,721 DEBUG [Thread-1484 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:56,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141556738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,744 DEBUG [Thread-1490 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4185 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:56,756 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/df1cced665dc4466b57e96294d2a1f33 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/df1cced665dc4466b57e96294d2a1f33 2024-11-20T22:24:56,776 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/C of bcb32e1c322c877c5cddb0d2b8bcab6a into df1cced665dc4466b57e96294d2a1f33(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:56,776 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:56,776 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/C, priority=13, startTime=1732141496117; duration=0sec 2024-11-20T22:24:56,776 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:56,776 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:C 2024-11-20T22:24:56,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:24:56,791 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/add0aafb054f4bfe99738ae6d82728bc 2024-11-20T22:24:56,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/a8972f4169e54892861cd4219829d07a is 50, key is test_row_0/B:col10/1732141495105/Put/seqid=0 2024-11-20T22:24:56,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742169_1345 (size=12001) 2024-11-20T22:24:56,831 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/a8972f4169e54892861cd4219829d07a 2024-11-20T22:24:56,841 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:24:56,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:56,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:56,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:56,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:56,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:56,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:56,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/65ddd4c16078447c92dba4125f0b21b8 is 50, key is test_row_0/C:col10/1732141495105/Put/seqid=0 2024-11-20T22:24:56,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742170_1346 (size=12001) 2024-11-20T22:24:56,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/65ddd4c16078447c92dba4125f0b21b8 2024-11-20T22:24:56,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/add0aafb054f4bfe99738ae6d82728bc as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/add0aafb054f4bfe99738ae6d82728bc 2024-11-20T22:24:56,919 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/add0aafb054f4bfe99738ae6d82728bc, entries=200, sequenceid=77, filesize=38.6 K 2024-11-20T22:24:56,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/a8972f4169e54892861cd4219829d07a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/a8972f4169e54892861cd4219829d07a 2024-11-20T22:24:56,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/a8972f4169e54892861cd4219829d07a, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T22:24:56,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/65ddd4c16078447c92dba4125f0b21b8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/65ddd4c16078447c92dba4125f0b21b8 2024-11-20T22:24:56,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/65ddd4c16078447c92dba4125f0b21b8, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T22:24:56,939 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for bcb32e1c322c877c5cddb0d2b8bcab6a in 686ms, sequenceid=77, compaction requested=false 2024-11-20T22:24:56,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:56,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:24:56,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:24:56,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:56,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:24:56,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:56,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:24:56,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:56,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:56,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209588e3f63dee414b8f394925c9a1edbd_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141496298/Put/seqid=0 2024-11-20T22:24:56,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:24:56,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:56,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:24:56,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:56,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:56,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:56,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:56,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:56,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742171_1347 (size=14594) 2024-11-20T22:24:57,028 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:57,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141557022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,031 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209588e3f63dee414b8f394925c9a1edbd_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209588e3f63dee414b8f394925c9a1edbd_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:57,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141557024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,033 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/416c6ccb474a4e96a656dea46785dff5, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:57,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141557024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/416c6ccb474a4e96a656dea46785dff5 is 175, key is test_row_0/A:col10/1732141496298/Put/seqid=0 2024-11-20T22:24:57,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742172_1348 (size=39549) 2024-11-20T22:24:57,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141557134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141557137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141557137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,147 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:24:57,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:57,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,148 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:24:57,300 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:24:57,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:57,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,301 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141557343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141557343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141557344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,454 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:24:57,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:57,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,468 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/416c6ccb474a4e96a656dea46785dff5 2024-11-20T22:24:57,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/7d381056fdc04479923bb0658e08d132 is 50, key is test_row_0/B:col10/1732141496298/Put/seqid=0 2024-11-20T22:24:57,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742173_1349 (size=12001) 2024-11-20T22:24:57,531 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/7d381056fdc04479923bb0658e08d132 2024-11-20T22:24:57,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/3159c3cb04b341cab432fd6222093349 is 50, key is test_row_0/C:col10/1732141496298/Put/seqid=0 2024-11-20T22:24:57,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742174_1350 (size=12001) 2024-11-20T22:24:57,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/3159c3cb04b341cab432fd6222093349 2024-11-20T22:24:57,606 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:24:57,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:57,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,607 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/416c6ccb474a4e96a656dea46785dff5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/416c6ccb474a4e96a656dea46785dff5 2024-11-20T22:24:57,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/416c6ccb474a4e96a656dea46785dff5, entries=200, sequenceid=93, filesize=38.6 K 2024-11-20T22:24:57,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/7d381056fdc04479923bb0658e08d132 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/7d381056fdc04479923bb0658e08d132 2024-11-20T22:24:57,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/7d381056fdc04479923bb0658e08d132, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T22:24:57,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/3159c3cb04b341cab432fd6222093349 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/3159c3cb04b341cab432fd6222093349 2024-11-20T22:24:57,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/3159c3cb04b341cab432fd6222093349, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T22:24:57,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for bcb32e1c322c877c5cddb0d2b8bcab6a in 690ms, sequenceid=93, compaction requested=true 2024-11-20T22:24:57,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:57,632 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:57,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:57,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:57,632 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:57,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:57,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:57,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:57,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:57,636 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:57,636 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/B is initiating minor compaction (all files) 2024-11-20T22:24:57,636 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/B in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,636 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/42a50ab1cba242e0ac0b42927f58c279, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/a8972f4169e54892861cd4219829d07a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/7d381056fdc04479923bb0658e08d132] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=35.3 K 2024-11-20T22:24:57,637 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110156 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:57,637 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/A is initiating minor compaction (all files) 2024-11-20T22:24:57,637 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/A in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,637 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e18defaa0ee74b5c8c3bbbe65cab78d1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/add0aafb054f4bfe99738ae6d82728bc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/416c6ccb474a4e96a656dea46785dff5] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=107.6 K 2024-11-20T22:24:57,637 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,637 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e18defaa0ee74b5c8c3bbbe65cab78d1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/add0aafb054f4bfe99738ae6d82728bc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/416c6ccb474a4e96a656dea46785dff5] 2024-11-20T22:24:57,637 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting e18defaa0ee74b5c8c3bbbe65cab78d1, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732141494674 2024-11-20T22:24:57,637 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 42a50ab1cba242e0ac0b42927f58c279, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732141494674 2024-11-20T22:24:57,638 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting add0aafb054f4bfe99738ae6d82728bc, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732141495084 2024-11-20T22:24:57,638 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting a8972f4169e54892861cd4219829d07a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732141495084 2024-11-20T22:24:57,638 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 416c6ccb474a4e96a656dea46785dff5, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141496297 2024-11-20T22:24:57,638 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d381056fdc04479923bb0658e08d132, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141496298 2024-11-20T22:24:57,654 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:57,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:57,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:24:57,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:24:57,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:57,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:24:57,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:57,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:24:57,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:57,665 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#B#compaction#295 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:57,666 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/3a125b3cc4d3442fbe66e9e0b15d75db is 50, key is test_row_0/B:col10/1732141496298/Put/seqid=0 2024-11-20T22:24:57,671 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120a5265edefb3f4914b02a1e9dc205da9f_bcb32e1c322c877c5cddb0d2b8bcab6a store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:57,674 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120a5265edefb3f4914b02a1e9dc205da9f_bcb32e1c322c877c5cddb0d2b8bcab6a, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:57,674 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a5265edefb3f4914b02a1e9dc205da9f_bcb32e1c322c877c5cddb0d2b8bcab6a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:57,695 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a74d0d4da24240578b31854b5b0affbe_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141497022/Put/seqid=0 2024-11-20T22:24:57,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141557685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141557686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141557698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742175_1351 (size=12207) 2024-11-20T22:24:57,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742176_1352 (size=4469) 2024-11-20T22:24:57,734 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#A#compaction#294 average throughput is 0.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:57,735 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/87f031d7ef60413780c215473e743e0e is 175, key is test_row_0/A:col10/1732141496298/Put/seqid=0 2024-11-20T22:24:57,741 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/3a125b3cc4d3442fbe66e9e0b15d75db as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/3a125b3cc4d3442fbe66e9e0b15d75db 2024-11-20T22:24:57,746 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/B of bcb32e1c322c877c5cddb0d2b8bcab6a into 3a125b3cc4d3442fbe66e9e0b15d75db(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:57,746 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:57,746 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/B, priority=13, startTime=1732141497632; duration=0sec 2024-11-20T22:24:57,746 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:57,746 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:B 2024-11-20T22:24:57,746 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:57,748 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:57,748 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/C is initiating minor compaction (all files) 2024-11-20T22:24:57,748 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/C in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,748 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/df1cced665dc4466b57e96294d2a1f33, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/65ddd4c16078447c92dba4125f0b21b8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/3159c3cb04b341cab432fd6222093349] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=35.3 K 2024-11-20T22:24:57,749 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting df1cced665dc4466b57e96294d2a1f33, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732141494674 2024-11-20T22:24:57,749 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 65ddd4c16078447c92dba4125f0b21b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732141495084 2024-11-20T22:24:57,749 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 3159c3cb04b341cab432fd6222093349, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141496298 2024-11-20T22:24:57,760 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:24:57,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:57,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742177_1353 (size=14594) 2024-11-20T22:24:57,766 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:57,772 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a74d0d4da24240578b31854b5b0affbe_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a74d0d4da24240578b31854b5b0affbe_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:57,773 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/f18244a860934112aba54c2026defde5, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:57,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/f18244a860934112aba54c2026defde5 is 175, key is test_row_0/A:col10/1732141497022/Put/seqid=0 2024-11-20T22:24:57,791 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#C#compaction#297 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:57,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:24:57,791 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/7ef22879dd674b81a67bdb5a3522e599 is 50, key is test_row_0/C:col10/1732141496298/Put/seqid=0 2024-11-20T22:24:57,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141557797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141557799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141557810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742179_1355 (size=39549) 2024-11-20T22:24:57,829 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/f18244a860934112aba54c2026defde5 2024-11-20T22:24:57,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742178_1354 (size=31161) 2024-11-20T22:24:57,850 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/87f031d7ef60413780c215473e743e0e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/87f031d7ef60413780c215473e743e0e 2024-11-20T22:24:57,856 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/A of bcb32e1c322c877c5cddb0d2b8bcab6a into 87f031d7ef60413780c215473e743e0e(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:57,856 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:57,856 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/A, priority=13, startTime=1732141497632; duration=0sec 2024-11-20T22:24:57,856 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:57,856 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:A 2024-11-20T22:24:57,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742180_1356 (size=12207) 2024-11-20T22:24:57,862 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/7ef22879dd674b81a67bdb5a3522e599 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/7ef22879dd674b81a67bdb5a3522e599 2024-11-20T22:24:57,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/710c1b8a557d4432be0484e47bb26015 is 50, key is test_row_0/B:col10/1732141497022/Put/seqid=0 2024-11-20T22:24:57,867 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/C of bcb32e1c322c877c5cddb0d2b8bcab6a into 7ef22879dd674b81a67bdb5a3522e599(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:57,867 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:57,867 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/C, priority=13, startTime=1732141497633; duration=0sec 2024-11-20T22:24:57,867 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:57,867 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:C 2024-11-20T22:24:57,914 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:57,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:24:57,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:57,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:57,915 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:57,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742181_1357 (size=12001) 2024-11-20T22:24:57,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/710c1b8a557d4432be0484e47bb26015 2024-11-20T22:24:57,936 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/f0d1879b27eb47eb9ab76741d9391275 is 50, key is test_row_0/C:col10/1732141497022/Put/seqid=0 2024-11-20T22:24:57,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742182_1358 (size=12001) 2024-11-20T22:24:58,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141558003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:58,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141558007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:58,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141558019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:58,066 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:58,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:24:58,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:58,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:58,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:58,067 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,218 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:58,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:24:58,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:58,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:58,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:58,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141558313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:58,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141558318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:58,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141558325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:58,382 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:58,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:24:58,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:58,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:58,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:58,387 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/f0d1879b27eb47eb9ab76741d9391275 2024-11-20T22:24:58,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/f18244a860934112aba54c2026defde5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/f18244a860934112aba54c2026defde5 2024-11-20T22:24:58,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/f18244a860934112aba54c2026defde5, entries=200, sequenceid=117, filesize=38.6 K 2024-11-20T22:24:58,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/710c1b8a557d4432be0484e47bb26015 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/710c1b8a557d4432be0484e47bb26015 2024-11-20T22:24:58,410 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/710c1b8a557d4432be0484e47bb26015, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T22:24:58,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/f0d1879b27eb47eb9ab76741d9391275 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/f0d1879b27eb47eb9ab76741d9391275 2024-11-20T22:24:58,416 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/f0d1879b27eb47eb9ab76741d9391275, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T22:24:58,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for bcb32e1c322c877c5cddb0d2b8bcab6a in 762ms, sequenceid=117, compaction requested=false 2024-11-20T22:24:58,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:58,545 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:24:58,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:24:58,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:58,546 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:24:58,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:24:58,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:58,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:24:58,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:58,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:24:58,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:58,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a6a7989f4f71485fae706504fc499227_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141497692/Put/seqid=0 2024-11-20T22:24:58,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742183_1359 (size=12154) 2024-11-20T22:24:58,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:24:58,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:24:58,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:58,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141558929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:58,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141558931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:58,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141558943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:59,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:59,056 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a6a7989f4f71485fae706504fc499227_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a6a7989f4f71485fae706504fc499227_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:24:59,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/2f1ed40deefd4a56890bbe93aedc38e9, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:24:59,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/2f1ed40deefd4a56890bbe93aedc38e9 is 175, key is test_row_0/A:col10/1732141497692/Put/seqid=0 2024-11-20T22:24:59,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141559047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:59,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141559048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:59,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141559062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:59,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742184_1360 (size=30955) 2024-11-20T22:24:59,106 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/2f1ed40deefd4a56890bbe93aedc38e9 2024-11-20T22:24:59,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/8b274ac00a9c4e1ea959a176abe5a752 is 50, key is test_row_0/B:col10/1732141497692/Put/seqid=0 2024-11-20T22:24:59,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742185_1361 (size=12001) 2024-11-20T22:24:59,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141559264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:59,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141559272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:59,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141559284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:59,584 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/8b274ac00a9c4e1ea959a176abe5a752 2024-11-20T22:24:59,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141559581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:59,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141559595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:59,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/c23cfe3ba837424f905a72ddfb75f5ce is 50, key is test_row_0/C:col10/1732141497692/Put/seqid=0 2024-11-20T22:24:59,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141559600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:24:59,612 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T22:24:59,612 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T22:24:59,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742186_1362 (size=12001) 2024-11-20T22:24:59,625 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/c23cfe3ba837424f905a72ddfb75f5ce 2024-11-20T22:24:59,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/2f1ed40deefd4a56890bbe93aedc38e9 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2f1ed40deefd4a56890bbe93aedc38e9 2024-11-20T22:24:59,683 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2f1ed40deefd4a56890bbe93aedc38e9, entries=150, sequenceid=132, filesize=30.2 K 2024-11-20T22:24:59,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/8b274ac00a9c4e1ea959a176abe5a752 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/8b274ac00a9c4e1ea959a176abe5a752 2024-11-20T22:24:59,714 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/8b274ac00a9c4e1ea959a176abe5a752, entries=150, sequenceid=132, filesize=11.7 K 2024-11-20T22:24:59,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/c23cfe3ba837424f905a72ddfb75f5ce as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/c23cfe3ba837424f905a72ddfb75f5ce 2024-11-20T22:24:59,756 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/c23cfe3ba837424f905a72ddfb75f5ce, entries=150, sequenceid=132, filesize=11.7 K 2024-11-20T22:24:59,757 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for bcb32e1c322c877c5cddb0d2b8bcab6a in 1212ms, sequenceid=132, compaction requested=true 2024-11-20T22:24:59,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:24:59,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:24:59,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-20T22:24:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-20T22:24:59,760 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-20T22:24:59,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0700 sec 2024-11-20T22:24:59,762 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 3.0750 sec 2024-11-20T22:25:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:00,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:25:00,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:00,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:00,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:00,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:00,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:00,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:00,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112020470c7c9719483687ff6a6c78f40d48_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141498906/Put/seqid=0 2024-11-20T22:25:00,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141560155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141560155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141560159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742187_1363 (size=14794) 2024-11-20T22:25:00,174 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:00,178 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112020470c7c9719483687ff6a6c78f40d48_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112020470c7c9719483687ff6a6c78f40d48_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:00,180 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/d8d12766592e4bc085b8c80f5c0f280c, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:00,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/d8d12766592e4bc085b8c80f5c0f280c is 175, key is test_row_0/A:col10/1732141498906/Put/seqid=0 2024-11-20T22:25:00,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742188_1364 (size=39749) 2024-11-20T22:25:00,218 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/d8d12766592e4bc085b8c80f5c0f280c 2024-11-20T22:25:00,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/2287a0e965e14de7bcfdd46b13e2c467 is 50, key is test_row_0/B:col10/1732141498906/Put/seqid=0 2024-11-20T22:25:00,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141560265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141560269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141560270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742189_1365 (size=12151) 2024-11-20T22:25:00,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141560478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141560486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141560487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/2287a0e965e14de7bcfdd46b13e2c467 2024-11-20T22:25:00,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/4d387d35ec8d4acaaf827a0eb097566a is 50, key is test_row_0/C:col10/1732141498906/Put/seqid=0 2024-11-20T22:25:00,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742190_1366 (size=12151) 2024-11-20T22:25:00,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141560761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,772 DEBUG [Thread-1484 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8229 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:25:00,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:25:00,795 INFO [Thread-1494 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-20T22:25:00,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141560785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141560790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,806 DEBUG [Thread-1490 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8246 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:25:00,807 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:00,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141560806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141560811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-20T22:25:00,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T22:25:00,815 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:00,816 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:00,816 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:00,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T22:25:00,968 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:00,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T22:25:00,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:00,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:00,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:00,969 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:01,125 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:01,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T22:25:01,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:01,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:01,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:01,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:01,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T22:25:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:01,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/4d387d35ec8d4acaaf827a0eb097566a 2024-11-20T22:25:01,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/d8d12766592e4bc085b8c80f5c0f280c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/d8d12766592e4bc085b8c80f5c0f280c 2024-11-20T22:25:01,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/d8d12766592e4bc085b8c80f5c0f280c, entries=200, sequenceid=157, filesize=38.8 K 2024-11-20T22:25:01,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/2287a0e965e14de7bcfdd46b13e2c467 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2287a0e965e14de7bcfdd46b13e2c467 2024-11-20T22:25:01,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2287a0e965e14de7bcfdd46b13e2c467, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T22:25:01,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/4d387d35ec8d4acaaf827a0eb097566a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/4d387d35ec8d4acaaf827a0eb097566a 2024-11-20T22:25:01,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/4d387d35ec8d4acaaf827a0eb097566a, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T22:25:01,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for bcb32e1c322c877c5cddb0d2b8bcab6a in 1103ms, sequenceid=157, compaction requested=true 2024-11-20T22:25:01,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:01,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:01,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:01,203 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:01,203 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:01,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:01,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:01,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:01,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:01,204 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:01,204 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/B is initiating minor compaction (all files) 2024-11-20T22:25:01,204 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/B in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:01,204 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/3a125b3cc4d3442fbe66e9e0b15d75db, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/710c1b8a557d4432be0484e47bb26015, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/8b274ac00a9c4e1ea959a176abe5a752, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2287a0e965e14de7bcfdd46b13e2c467] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=47.2 K 2024-11-20T22:25:01,204 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141414 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:01,205 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/A is initiating minor compaction (all files) 2024-11-20T22:25:01,205 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/A in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:01,205 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/87f031d7ef60413780c215473e743e0e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/f18244a860934112aba54c2026defde5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2f1ed40deefd4a56890bbe93aedc38e9, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/d8d12766592e4bc085b8c80f5c0f280c] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=138.1 K 2024-11-20T22:25:01,205 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:01,205 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/87f031d7ef60413780c215473e743e0e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/f18244a860934112aba54c2026defde5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2f1ed40deefd4a56890bbe93aedc38e9, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/d8d12766592e4bc085b8c80f5c0f280c] 2024-11-20T22:25:01,205 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a125b3cc4d3442fbe66e9e0b15d75db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141496298 2024-11-20T22:25:01,205 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87f031d7ef60413780c215473e743e0e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141496298 2024-11-20T22:25:01,206 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 710c1b8a557d4432be0484e47bb26015, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732141497022 2024-11-20T22:25:01,206 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting f18244a860934112aba54c2026defde5, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732141497020 2024-11-20T22:25:01,206 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b274ac00a9c4e1ea959a176abe5a752, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141497677 2024-11-20T22:25:01,206 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f1ed40deefd4a56890bbe93aedc38e9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141497677 2024-11-20T22:25:01,207 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2287a0e965e14de7bcfdd46b13e2c467, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732141498906 2024-11-20T22:25:01,207 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8d12766592e4bc085b8c80f5c0f280c, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732141498906 2024-11-20T22:25:01,237 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:01,240 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#B#compaction#307 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:01,241 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/e21c58822f9643888c4656b2c4b75240 is 50, key is test_row_0/B:col10/1732141498906/Put/seqid=0 2024-11-20T22:25:01,255 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120e223cd02d190498d8a21f69536236b28_bcb32e1c322c877c5cddb0d2b8bcab6a store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:01,258 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120e223cd02d190498d8a21f69536236b28_bcb32e1c322c877c5cddb0d2b8bcab6a, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:01,258 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e223cd02d190498d8a21f69536236b28_bcb32e1c322c877c5cddb0d2b8bcab6a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:01,278 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:01,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T22:25:01,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:01,279 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:25:01,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:01,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:01,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:01,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:01,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:01,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:01,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742191_1367 (size=12493) 2024-11-20T22:25:01,296 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/e21c58822f9643888c4656b2c4b75240 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/e21c58822f9643888c4656b2c4b75240 2024-11-20T22:25:01,303 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/B of bcb32e1c322c877c5cddb0d2b8bcab6a into e21c58822f9643888c4656b2c4b75240(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:01,303 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:01,304 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/B, priority=12, startTime=1732141501203; duration=0sec 2024-11-20T22:25:01,304 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:01,304 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:B 2024-11-20T22:25:01,304 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:01,305 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:01,305 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/C is initiating minor compaction (all files) 2024-11-20T22:25:01,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742192_1368 (size=4469) 2024-11-20T22:25:01,306 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/C in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:01,307 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#A#compaction#306 average throughput is 0.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:01,308 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/7ef22879dd674b81a67bdb5a3522e599, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/f0d1879b27eb47eb9ab76741d9391275, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/c23cfe3ba837424f905a72ddfb75f5ce, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/4d387d35ec8d4acaaf827a0eb097566a] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=47.2 K 2024-11-20T22:25:01,309 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/8cf62680bb864d11866610c66e05a173 is 175, key is test_row_0/A:col10/1732141498906/Put/seqid=0 2024-11-20T22:25:01,309 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ef22879dd674b81a67bdb5a3522e599, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141496298 2024-11-20T22:25:01,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120eb161503a6ed45c2adcecba8617328b0_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141500153/Put/seqid=0 2024-11-20T22:25:01,312 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting f0d1879b27eb47eb9ab76741d9391275, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732141497022 2024-11-20T22:25:01,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:01,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:01,313 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c23cfe3ba837424f905a72ddfb75f5ce, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141497677 2024-11-20T22:25:01,314 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d387d35ec8d4acaaf827a0eb097566a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732141498906 2024-11-20T22:25:01,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742193_1369 (size=31447) 2024-11-20T22:25:01,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742194_1370 (size=12304) 2024-11-20T22:25:01,346 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#C#compaction#309 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:01,346 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/dbaca0becd2e4a098b6ed6bb3cf017ce is 50, key is test_row_0/C:col10/1732141498906/Put/seqid=0 2024-11-20T22:25:01,350 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/8cf62680bb864d11866610c66e05a173 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8cf62680bb864d11866610c66e05a173 2024-11-20T22:25:01,355 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/A of bcb32e1c322c877c5cddb0d2b8bcab6a into 8cf62680bb864d11866610c66e05a173(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:01,355 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:01,355 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/A, priority=12, startTime=1732141501202; duration=0sec 2024-11-20T22:25:01,355 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:01,355 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:A 2024-11-20T22:25:01,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742195_1371 (size=12493) 2024-11-20T22:25:01,404 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/dbaca0becd2e4a098b6ed6bb3cf017ce as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/dbaca0becd2e4a098b6ed6bb3cf017ce 2024-11-20T22:25:01,422 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/C of bcb32e1c322c877c5cddb0d2b8bcab6a into dbaca0becd2e4a098b6ed6bb3cf017ce(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:01,422 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:01,422 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/C, priority=12, startTime=1732141501203; duration=0sec 2024-11-20T22:25:01,422 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:01,422 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:C 2024-11-20T22:25:01,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:01,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141561407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:01,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:01,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141561408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:01,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:01,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141561422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:01,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T22:25:01,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:01,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141561524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:01,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:01,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141561526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:01,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:01,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141561538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:01,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:01,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141561732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:01,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:01,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141561732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:01,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:01,759 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120eb161503a6ed45c2adcecba8617328b0_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120eb161503a6ed45c2adcecba8617328b0_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:01,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/226f3eaebde345a1b0e4dcfb6b41e69c, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:01,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/226f3eaebde345a1b0e4dcfb6b41e69c is 175, key is test_row_0/A:col10/1732141500153/Put/seqid=0 2024-11-20T22:25:01,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:01,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141561751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:01,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742196_1372 (size=31105) 2024-11-20T22:25:01,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T22:25:02,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141562047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:02,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141562048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:02,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141562072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:02,214 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=169, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/226f3eaebde345a1b0e4dcfb6b41e69c 2024-11-20T22:25:02,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/6b6344107640488a81c9de6953085978 is 50, key is test_row_0/B:col10/1732141500153/Put/seqid=0 2024-11-20T22:25:02,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742197_1373 (size=12151) 2024-11-20T22:25:02,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141562557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:02,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141562565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:02,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141562584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:02,694 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/6b6344107640488a81c9de6953085978 2024-11-20T22:25:02,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/64a0e14971384c06a367f8354026516d is 50, key is test_row_0/C:col10/1732141500153/Put/seqid=0 2024-11-20T22:25:02,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742198_1374 (size=12151) 2024-11-20T22:25:02,759 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/64a0e14971384c06a367f8354026516d 2024-11-20T22:25:02,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/226f3eaebde345a1b0e4dcfb6b41e69c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/226f3eaebde345a1b0e4dcfb6b41e69c 2024-11-20T22:25:02,770 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/226f3eaebde345a1b0e4dcfb6b41e69c, entries=150, sequenceid=169, filesize=30.4 K 2024-11-20T22:25:02,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/6b6344107640488a81c9de6953085978 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/6b6344107640488a81c9de6953085978 2024-11-20T22:25:02,776 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/6b6344107640488a81c9de6953085978, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T22:25:02,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/64a0e14971384c06a367f8354026516d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/64a0e14971384c06a367f8354026516d 2024-11-20T22:25:02,783 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/64a0e14971384c06a367f8354026516d, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T22:25:02,798 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for bcb32e1c322c877c5cddb0d2b8bcab6a in 1519ms, sequenceid=169, compaction requested=false 2024-11-20T22:25:02,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:02,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:02,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-20T22:25:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-20T22:25:02,809 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-20T22:25:02,809 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9870 sec 2024-11-20T22:25:02,810 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.0020 sec 2024-11-20T22:25:02,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T22:25:02,933 INFO [Thread-1494 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-20T22:25:02,943 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:02,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-20T22:25:02,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T22:25:02,944 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:02,945 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:02,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:03,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T22:25:03,096 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:03,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T22:25:03,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:03,097 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:25:03,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:03,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:03,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:03,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:03,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:03,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:03,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120606801d675d14bb18fb490107c03d8f2_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141501406/Put/seqid=0 2024-11-20T22:25:03,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742199_1375 (size=12304) 2024-11-20T22:25:03,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:03,192 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120606801d675d14bb18fb490107c03d8f2_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120606801d675d14bb18fb490107c03d8f2_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:03,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/8f56c74831de4b6682b4862ba21728bb, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:03,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/8f56c74831de4b6682b4862ba21728bb is 175, key is test_row_0/A:col10/1732141501406/Put/seqid=0 2024-11-20T22:25:03,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742200_1376 (size=31105) 2024-11-20T22:25:03,214 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=196, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/8f56c74831de4b6682b4862ba21728bb 2024-11-20T22:25:03,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/ed8f2ae29c5d400fb7b337f3a5e21913 is 50, key is test_row_0/B:col10/1732141501406/Put/seqid=0 2024-11-20T22:25:03,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T22:25:03,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742201_1377 (size=12151) 2024-11-20T22:25:03,265 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/ed8f2ae29c5d400fb7b337f3a5e21913 2024-11-20T22:25:03,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/1dccf5d3cb264456a524b55988a4b765 is 50, key is test_row_0/C:col10/1732141501406/Put/seqid=0 2024-11-20T22:25:03,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742202_1378 (size=12151) 2024-11-20T22:25:03,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T22:25:03,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:03,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:03,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141563615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:03,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141563618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:03,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141563625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:03,727 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/1dccf5d3cb264456a524b55988a4b765 2024-11-20T22:25:03,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/8f56c74831de4b6682b4862ba21728bb as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8f56c74831de4b6682b4862ba21728bb 2024-11-20T22:25:03,739 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8f56c74831de4b6682b4862ba21728bb, entries=150, sequenceid=196, filesize=30.4 K 2024-11-20T22:25:03,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141563731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:03,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/ed8f2ae29c5d400fb7b337f3a5e21913 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/ed8f2ae29c5d400fb7b337f3a5e21913 2024-11-20T22:25:03,748 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/ed8f2ae29c5d400fb7b337f3a5e21913, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T22:25:03,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/1dccf5d3cb264456a524b55988a4b765 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/1dccf5d3cb264456a524b55988a4b765 2024-11-20T22:25:03,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141563742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:03,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141563745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:03,755 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/1dccf5d3cb264456a524b55988a4b765, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T22:25:03,757 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for bcb32e1c322c877c5cddb0d2b8bcab6a in 659ms, sequenceid=196, compaction requested=true 2024-11-20T22:25:03,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:03,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:03,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-20T22:25:03,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-20T22:25:03,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-20T22:25:03,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 817 msec 2024-11-20T22:25:03,764 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 820 msec 2024-11-20T22:25:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:03,947 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:25:03,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:03,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:03,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:03,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:03,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:03,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:03,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e8445944f2e244f2b39e97de3ba4290e_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141503624/Put/seqid=0 2024-11-20T22:25:04,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742203_1379 (size=12304) 2024-11-20T22:25:04,008 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:04,014 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e8445944f2e244f2b39e97de3ba4290e_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e8445944f2e244f2b39e97de3ba4290e_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:04,015 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/968be4df223d4e0188c225f6b61adcbe, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:04,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/968be4df223d4e0188c225f6b61adcbe is 175, key is test_row_0/A:col10/1732141503624/Put/seqid=0 2024-11-20T22:25:04,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141564024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141564028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141564029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T22:25:04,047 INFO [Thread-1494 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-20T22:25:04,048 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:04,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-20T22:25:04,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:04,050 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:04,051 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:04,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:04,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742204_1380 (size=31105) 2024-11-20T22:25:04,062 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=209, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/968be4df223d4e0188c225f6b61adcbe 2024-11-20T22:25:04,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/2667fe0ca6344b9bb364879e2d4d07a0 is 50, key is test_row_0/B:col10/1732141503624/Put/seqid=0 2024-11-20T22:25:04,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742205_1381 (size=12151) 2024-11-20T22:25:04,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141564140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141564143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141564143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:04,203 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:04,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:04,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:04,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:04,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,357 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,357 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:04,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:04,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:04,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:04,358 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:04,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141564356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141564357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141564362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,516 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:04,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:04,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:04,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:04,517 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/2667fe0ca6344b9bb364879e2d4d07a0 2024-11-20T22:25:04,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/5ecb6b8309c9420e9716c157af900573 is 50, key is test_row_0/C:col10/1732141503624/Put/seqid=0 2024-11-20T22:25:04,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742206_1382 (size=12151) 2024-11-20T22:25:04,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:04,669 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:04,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:04,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:04,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:04,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141564673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141564674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141564682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,824 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,824 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:04,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:04,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:04,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:04,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,980 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:04,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:04,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:04,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:04,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:04,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,999 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/5ecb6b8309c9420e9716c157af900573 2024-11-20T22:25:05,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/968be4df223d4e0188c225f6b61adcbe as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/968be4df223d4e0188c225f6b61adcbe 2024-11-20T22:25:05,046 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/968be4df223d4e0188c225f6b61adcbe, entries=150, sequenceid=209, filesize=30.4 K 2024-11-20T22:25:05,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/2667fe0ca6344b9bb364879e2d4d07a0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2667fe0ca6344b9bb364879e2d4d07a0 2024-11-20T22:25:05,052 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2667fe0ca6344b9bb364879e2d4d07a0, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T22:25:05,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/5ecb6b8309c9420e9716c157af900573 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/5ecb6b8309c9420e9716c157af900573 2024-11-20T22:25:05,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/5ecb6b8309c9420e9716c157af900573, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T22:25:05,065 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for bcb32e1c322c877c5cddb0d2b8bcab6a in 1117ms, sequenceid=209, compaction requested=true 2024-11-20T22:25:05,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:05,065 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:05,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:05,066 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124762 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:05,066 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/A is initiating minor compaction (all files) 2024-11-20T22:25:05,066 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/A in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:05,066 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8cf62680bb864d11866610c66e05a173, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/226f3eaebde345a1b0e4dcfb6b41e69c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8f56c74831de4b6682b4862ba21728bb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/968be4df223d4e0188c225f6b61adcbe] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=121.8 K 2024-11-20T22:25:05,067 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:05,067 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8cf62680bb864d11866610c66e05a173, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/226f3eaebde345a1b0e4dcfb6b41e69c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8f56c74831de4b6682b4862ba21728bb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/968be4df223d4e0188c225f6b61adcbe] 2024-11-20T22:25:05,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:05,067 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:05,067 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8cf62680bb864d11866610c66e05a173, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732141498906 2024-11-20T22:25:05,068 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 226f3eaebde345a1b0e4dcfb6b41e69c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732141500131 2024-11-20T22:25:05,068 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f56c74831de4b6682b4862ba21728bb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732141501405 2024-11-20T22:25:05,068 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:05,068 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/B is initiating minor compaction (all files) 2024-11-20T22:25:05,068 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/B in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:05,069 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/e21c58822f9643888c4656b2c4b75240, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/6b6344107640488a81c9de6953085978, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/ed8f2ae29c5d400fb7b337f3a5e21913, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2667fe0ca6344b9bb364879e2d4d07a0] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=47.8 K 2024-11-20T22:25:05,069 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting e21c58822f9643888c4656b2c4b75240, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732141498906 2024-11-20T22:25:05,069 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 968be4df223d4e0188c225f6b61adcbe, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732141503614 2024-11-20T22:25:05,070 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b6344107640488a81c9de6953085978, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732141500131 2024-11-20T22:25:05,070 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ed8f2ae29c5d400fb7b337f3a5e21913, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732141501405 2024-11-20T22:25:05,071 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2667fe0ca6344b9bb364879e2d4d07a0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732141503614 2024-11-20T22:25:05,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:05,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:05,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:05,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:05,094 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:05,113 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#B#compaction#319 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:05,114 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/c2166be2703d40a49bd734e94deaf990 is 50, key is test_row_0/B:col10/1732141503624/Put/seqid=0 2024-11-20T22:25:05,130 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411206e8da4ed586646689fab378ee06d3f2d_bcb32e1c322c877c5cddb0d2b8bcab6a store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:05,133 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411206e8da4ed586646689fab378ee06d3f2d_bcb32e1c322c877c5cddb0d2b8bcab6a, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:05,133 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206e8da4ed586646689fab378ee06d3f2d_bcb32e1c322c877c5cddb0d2b8bcab6a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:05,134 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:05,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:05,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:05,135 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:25:05,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:05,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:05,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:05,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:05,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:05,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:05,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742207_1383 (size=12629) 2024-11-20T22:25:05,166 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/c2166be2703d40a49bd734e94deaf990 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/c2166be2703d40a49bd734e94deaf990 2024-11-20T22:25:05,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:05,171 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/B of bcb32e1c322c877c5cddb0d2b8bcab6a into c2166be2703d40a49bd734e94deaf990(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:05,171 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:05,171 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/B, priority=12, startTime=1732141505067; duration=0sec 2024-11-20T22:25:05,171 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:05,171 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:B 2024-11-20T22:25:05,171 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:05,172 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:05,172 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/C is initiating minor compaction (all files) 2024-11-20T22:25:05,173 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/C in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:05,173 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/dbaca0becd2e4a098b6ed6bb3cf017ce, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/64a0e14971384c06a367f8354026516d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/1dccf5d3cb264456a524b55988a4b765, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/5ecb6b8309c9420e9716c157af900573] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=47.8 K 2024-11-20T22:25:05,173 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting dbaca0becd2e4a098b6ed6bb3cf017ce, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732141498906 2024-11-20T22:25:05,173 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 64a0e14971384c06a367f8354026516d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732141500131 2024-11-20T22:25:05,174 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 1dccf5d3cb264456a524b55988a4b765, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732141501405 2024-11-20T22:25:05,174 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ecb6b8309c9420e9716c157af900573, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732141503614 2024-11-20T22:25:05,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742208_1384 (size=4469) 2024-11-20T22:25:05,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:05,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:05,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201225a9baf07d41ffad3746e083d722d7_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141504022/Put/seqid=0 2024-11-20T22:25:05,235 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#C#compaction#321 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:05,235 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/388c82c086b24d618c2604a0a1e40a6b is 50, key is test_row_0/C:col10/1732141503624/Put/seqid=0 2024-11-20T22:25:05,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742209_1385 (size=12304) 2024-11-20T22:25:05,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:05,254 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201225a9baf07d41ffad3746e083d722d7_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201225a9baf07d41ffad3746e083d722d7_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:05,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/05fb18cfffbb476ba02f9e95b1d02fd2, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:05,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/05fb18cfffbb476ba02f9e95b1d02fd2 is 175, key is test_row_0/A:col10/1732141504022/Put/seqid=0 2024-11-20T22:25:05,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141565254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:05,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141565256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:05,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141565262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:05,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742210_1386 (size=12629) 2024-11-20T22:25:05,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742211_1387 (size=31105) 2024-11-20T22:25:05,319 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=232, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/05fb18cfffbb476ba02f9e95b1d02fd2 2024-11-20T22:25:05,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/031260e43d4d4592b4188e9750d4181f is 50, key is test_row_0/B:col10/1732141504022/Put/seqid=0 2024-11-20T22:25:05,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141565364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:05,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141565369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:05,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141565377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:05,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742212_1388 (size=12151) 2024-11-20T22:25:05,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141565573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:05,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141565582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:05,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141565585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:05,611 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#A#compaction#318 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:05,612 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/36a51329e1184c32800e997a93d25135 is 175, key is test_row_0/A:col10/1732141503624/Put/seqid=0 2024-11-20T22:25:05,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742213_1389 (size=31583) 2024-11-20T22:25:05,658 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/36a51329e1184c32800e997a93d25135 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/36a51329e1184c32800e997a93d25135 2024-11-20T22:25:05,667 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/A of bcb32e1c322c877c5cddb0d2b8bcab6a into 36a51329e1184c32800e997a93d25135(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:05,667 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:05,667 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/A, priority=12, startTime=1732141505065; duration=0sec 2024-11-20T22:25:05,667 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:05,667 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:A 2024-11-20T22:25:05,701 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/388c82c086b24d618c2604a0a1e40a6b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/388c82c086b24d618c2604a0a1e40a6b 2024-11-20T22:25:05,711 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/C of bcb32e1c322c877c5cddb0d2b8bcab6a into 388c82c086b24d618c2604a0a1e40a6b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:05,712 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:05,712 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/C, priority=12, startTime=1732141505074; duration=0sec 2024-11-20T22:25:05,713 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:05,713 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:C 2024-11-20T22:25:05,790 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/031260e43d4d4592b4188e9750d4181f 2024-11-20T22:25:05,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/ed71e2f16e9e49c59bc2398a0feb313d is 50, key is test_row_0/C:col10/1732141504022/Put/seqid=0 2024-11-20T22:25:05,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742214_1390 (size=12151) 2024-11-20T22:25:05,824 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/ed71e2f16e9e49c59bc2398a0feb313d 2024-11-20T22:25:05,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/05fb18cfffbb476ba02f9e95b1d02fd2 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/05fb18cfffbb476ba02f9e95b1d02fd2 2024-11-20T22:25:05,846 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/05fb18cfffbb476ba02f9e95b1d02fd2, entries=150, sequenceid=232, filesize=30.4 K 2024-11-20T22:25:05,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/031260e43d4d4592b4188e9750d4181f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/031260e43d4d4592b4188e9750d4181f 2024-11-20T22:25:05,853 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/031260e43d4d4592b4188e9750d4181f, entries=150, sequenceid=232, filesize=11.9 K 2024-11-20T22:25:05,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/ed71e2f16e9e49c59bc2398a0feb313d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ed71e2f16e9e49c59bc2398a0feb313d 2024-11-20T22:25:05,857 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ed71e2f16e9e49c59bc2398a0feb313d, entries=150, sequenceid=232, filesize=11.9 K 2024-11-20T22:25:05,858 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for bcb32e1c322c877c5cddb0d2b8bcab6a in 723ms, sequenceid=232, compaction requested=false 2024-11-20T22:25:05,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:05,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:05,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-20T22:25:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-20T22:25:05,863 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-20T22:25:05,863 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8090 sec 2024-11-20T22:25:05,865 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.8150 sec 2024-11-20T22:25:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:05,883 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:25:05,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:05,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:05,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:05,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:05,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:05,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:05,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202f629eca87eb4c85906a46f11d1f7a8a_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141505260/Put/seqid=0 2024-11-20T22:25:05,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742215_1391 (size=14794) 2024-11-20T22:25:05,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141565950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:05,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141565951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:05,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141565952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141566054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141566059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141566061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:06,178 INFO [Thread-1494 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-20T22:25:06,182 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-20T22:25:06,185 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:06,185 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:06,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T22:25:06,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141566261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141566265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141566267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T22:25:06,309 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:06,316 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202f629eca87eb4c85906a46f11d1f7a8a_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f629eca87eb4c85906a46f11d1f7a8a_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:06,317 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/831b0c5b9148478a94ad00b197358f38, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:06,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/831b0c5b9148478a94ad00b197358f38 is 175, key is test_row_0/A:col10/1732141505260/Put/seqid=0 2024-11-20T22:25:06,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742216_1392 (size=39749) 2024-11-20T22:25:06,327 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=249, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/831b0c5b9148478a94ad00b197358f38 2024-11-20T22:25:06,339 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T22:25:06,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:06,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:06,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:06,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/8a8e0caa5ea14ae1b8fcbc0c40e254f0 is 50, key is test_row_0/B:col10/1732141505260/Put/seqid=0 2024-11-20T22:25:06,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742217_1393 (size=12151) 2024-11-20T22:25:06,492 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T22:25:06,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:06,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:06,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:06,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T22:25:06,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:06,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141566573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:06,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141566575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:06,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141566579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,648 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T22:25:06,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:06,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:06,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:06,649 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,758 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/8a8e0caa5ea14ae1b8fcbc0c40e254f0 2024-11-20T22:25:06,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/4477c274c1544af197e805857caec6b0 is 50, key is test_row_0/C:col10/1732141505260/Put/seqid=0 2024-11-20T22:25:06,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T22:25:06,801 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T22:25:06,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:06,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:06,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:06,801 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742218_1394 (size=12151) 2024-11-20T22:25:06,955 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:06,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T22:25:06,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:06,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:06,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:06,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:07,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:07,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141567084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:07,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:07,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141567085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:07,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:07,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141567086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:07,108 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:07,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T22:25:07,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:07,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:07,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:07,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:07,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:07,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:07,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/4477c274c1544af197e805857caec6b0 2024-11-20T22:25:07,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/831b0c5b9148478a94ad00b197358f38 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/831b0c5b9148478a94ad00b197358f38 2024-11-20T22:25:07,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/831b0c5b9148478a94ad00b197358f38, entries=200, sequenceid=249, filesize=38.8 K 2024-11-20T22:25:07,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/8a8e0caa5ea14ae1b8fcbc0c40e254f0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/8a8e0caa5ea14ae1b8fcbc0c40e254f0 2024-11-20T22:25:07,229 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/8a8e0caa5ea14ae1b8fcbc0c40e254f0, entries=150, sequenceid=249, filesize=11.9 K 2024-11-20T22:25:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/4477c274c1544af197e805857caec6b0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/4477c274c1544af197e805857caec6b0 2024-11-20T22:25:07,233 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/4477c274c1544af197e805857caec6b0, entries=150, sequenceid=249, filesize=11.9 K 2024-11-20T22:25:07,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for bcb32e1c322c877c5cddb0d2b8bcab6a in 1350ms, sequenceid=249, compaction requested=true 2024-11-20T22:25:07,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:07,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:07,234 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:07,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:07,234 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102437 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:07,234 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:07,235 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/A is initiating minor compaction (all files) 2024-11-20T22:25:07,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:07,235 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/A in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:07,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:07,235 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/36a51329e1184c32800e997a93d25135, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/05fb18cfffbb476ba02f9e95b1d02fd2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/831b0c5b9148478a94ad00b197358f38] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=100.0 K 2024-11-20T22:25:07,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:07,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:07,235 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:07,235 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/36a51329e1184c32800e997a93d25135, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/05fb18cfffbb476ba02f9e95b1d02fd2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/831b0c5b9148478a94ad00b197358f38] 2024-11-20T22:25:07,235 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36a51329e1184c32800e997a93d25135, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732141503614 2024-11-20T22:25:07,235 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05fb18cfffbb476ba02f9e95b1d02fd2, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732141504018 2024-11-20T22:25:07,236 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:07,236 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/B is initiating minor compaction (all files) 2024-11-20T22:25:07,236 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/B in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:07,236 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/c2166be2703d40a49bd734e94deaf990, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/031260e43d4d4592b4188e9750d4181f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/8a8e0caa5ea14ae1b8fcbc0c40e254f0] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=36.1 K 2024-11-20T22:25:07,236 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 831b0c5b9148478a94ad00b197358f38, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732141505250 2024-11-20T22:25:07,236 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c2166be2703d40a49bd734e94deaf990, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732141503614 2024-11-20T22:25:07,236 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 031260e43d4d4592b4188e9750d4181f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732141504018 2024-11-20T22:25:07,236 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a8e0caa5ea14ae1b8fcbc0c40e254f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732141505250 2024-11-20T22:25:07,240 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:07,242 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112056351ec79a6e4787a95051b0a3c67c96_bcb32e1c322c877c5cddb0d2b8bcab6a store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:07,242 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#B#compaction#328 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:07,242 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/df196f56af5d44888ceebee93e2584c0 is 50, key is test_row_0/B:col10/1732141505260/Put/seqid=0 2024-11-20T22:25:07,244 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112056351ec79a6e4787a95051b0a3c67c96_bcb32e1c322c877c5cddb0d2b8bcab6a, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:07,244 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112056351ec79a6e4787a95051b0a3c67c96_bcb32e1c322c877c5cddb0d2b8bcab6a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:07,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742219_1395 (size=12731) 2024-11-20T22:25:07,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742220_1396 (size=4469) 2024-11-20T22:25:07,250 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#A#compaction#327 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:07,251 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/fb59bf2ba9b549a18968689868fa48a3 is 175, key is test_row_0/A:col10/1732141505260/Put/seqid=0 2024-11-20T22:25:07,260 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:07,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742221_1397 (size=31685) 2024-11-20T22:25:07,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T22:25:07,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:07,261 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:25:07,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:07,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:07,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:07,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:07,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:07,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:07,275 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/fb59bf2ba9b549a18968689868fa48a3 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fb59bf2ba9b549a18968689868fa48a3 2024-11-20T22:25:07,286 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/A of bcb32e1c322c877c5cddb0d2b8bcab6a into fb59bf2ba9b549a18968689868fa48a3(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:07,286 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:07,286 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/A, priority=13, startTime=1732141507233; duration=0sec 2024-11-20T22:25:07,286 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:07,286 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:A 2024-11-20T22:25:07,286 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:07,289 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:07,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ce6f9795f175412094550a2d736466f1_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141505937/Put/seqid=0 2024-11-20T22:25:07,289 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/C is initiating minor compaction (all files) 2024-11-20T22:25:07,289 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/C in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:07,289 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/388c82c086b24d618c2604a0a1e40a6b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ed71e2f16e9e49c59bc2398a0feb313d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/4477c274c1544af197e805857caec6b0] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=36.1 K 2024-11-20T22:25:07,289 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 388c82c086b24d618c2604a0a1e40a6b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732141503614 2024-11-20T22:25:07,290 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed71e2f16e9e49c59bc2398a0feb313d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732141504018 2024-11-20T22:25:07,290 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4477c274c1544af197e805857caec6b0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732141505250 2024-11-20T22:25:07,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T22:25:07,304 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#C#compaction#330 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:07,305 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/587d88b097024d5c8cb7962a4adadab5 is 50, key is test_row_0/C:col10/1732141505260/Put/seqid=0 2024-11-20T22:25:07,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742222_1398 (size=12454) 2024-11-20T22:25:07,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742223_1399 (size=12731) 2024-11-20T22:25:07,326 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/587d88b097024d5c8cb7962a4adadab5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/587d88b097024d5c8cb7962a4adadab5 2024-11-20T22:25:07,345 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/C of bcb32e1c322c877c5cddb0d2b8bcab6a into 587d88b097024d5c8cb7962a4adadab5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:07,345 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:07,345 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/C, priority=13, startTime=1732141507235; duration=0sec 2024-11-20T22:25:07,345 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:07,345 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:C 2024-11-20T22:25:07,667 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/df196f56af5d44888ceebee93e2584c0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/df196f56af5d44888ceebee93e2584c0 2024-11-20T22:25:07,675 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/B of bcb32e1c322c877c5cddb0d2b8bcab6a into df196f56af5d44888ceebee93e2584c0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:07,675 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:07,675 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/B, priority=13, startTime=1732141507234; duration=0sec 2024-11-20T22:25:07,675 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:07,675 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:B 2024-11-20T22:25:07,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,712 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ce6f9795f175412094550a2d736466f1_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ce6f9795f175412094550a2d736466f1_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:07,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/fca5d3d3d2954cb88629fa0fd296e519, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:07,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/fca5d3d3d2954cb88629fa0fd296e519 is 175, key is test_row_0/A:col10/1732141505937/Put/seqid=0 2024-11-20T22:25:07,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742224_1400 (size=31255) 2024-11-20T22:25:07,733 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=272, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/fca5d3d3d2954cb88629fa0fd296e519 2024-11-20T22:25:07,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/bc5c59d280564f20b9d006643ac06756 is 50, key is test_row_0/B:col10/1732141505937/Put/seqid=0 2024-11-20T22:25:07,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742225_1401 (size=12301) 2024-11-20T22:25:07,774 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/bc5c59d280564f20b9d006643ac06756 2024-11-20T22:25:07,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/6bcc6ec955434ac385a2aaf6b124ebda is 50, key is test_row_0/C:col10/1732141505937/Put/seqid=0 2024-11-20T22:25:07,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742226_1402 (size=12301) 2024-11-20T22:25:07,801 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/6bcc6ec955434ac385a2aaf6b124ebda 2024-11-20T22:25:07,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/fca5d3d3d2954cb88629fa0fd296e519 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fca5d3d3d2954cb88629fa0fd296e519 2024-11-20T22:25:07,814 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fca5d3d3d2954cb88629fa0fd296e519, entries=150, sequenceid=272, filesize=30.5 K 2024-11-20T22:25:07,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/bc5c59d280564f20b9d006643ac06756 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/bc5c59d280564f20b9d006643ac06756 2024-11-20T22:25:07,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,817 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/bc5c59d280564f20b9d006643ac06756, entries=150, sequenceid=272, filesize=12.0 K 2024-11-20T22:25:07,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/6bcc6ec955434ac385a2aaf6b124ebda as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/6bcc6ec955434ac385a2aaf6b124ebda 2024-11-20T22:25:07,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,823 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/6bcc6ec955434ac385a2aaf6b124ebda, entries=150, sequenceid=272, filesize=12.0 K 2024-11-20T22:25:07,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,824 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for bcb32e1c322c877c5cddb0d2b8bcab6a in 563ms, sequenceid=272, compaction requested=false 2024-11-20T22:25:07,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:07,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:07,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-20T22:25:07,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-20T22:25:07,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-20T22:25:07,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,827 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6400 sec 2024-11-20T22:25:07,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,828 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.6450 sec 2024-11-20T22:25:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:08,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:25:08,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:08,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:08,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:08,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:08,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112044a830d7b97b4f30b29d72419688ecb3_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141508223/Put/seqid=0 2024-11-20T22:25:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742227_1403 (size=22618) 2024-11-20T22:25:08,292 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T22:25:08,297 INFO [Thread-1494 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-20T22:25:08,301 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-20T22:25:08,303 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:08,303 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T22:25:08,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:08,308 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112044a830d7b97b4f30b29d72419688ecb3_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112044a830d7b97b4f30b29d72419688ecb3_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:08,311 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/40cb7493acb24539b837cc3df02db019, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:08,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/40cb7493acb24539b837cc3df02db019 is 175, key is test_row_0/A:col10/1732141508223/Put/seqid=0 2024-11-20T22:25:08,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742228_1404 (size=66023) 2024-11-20T22:25:08,367 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=287, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/40cb7493acb24539b837cc3df02db019 2024-11-20T22:25:08,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141568366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141568366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141568366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/861b7aab5657453bb4e307909b49f677 is 50, key is test_row_0/B:col10/1732141508223/Put/seqid=0 2024-11-20T22:25:08,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742229_1405 (size=12301) 2024-11-20T22:25:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T22:25:08,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/861b7aab5657453bb4e307909b49f677 2024-11-20T22:25:08,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/24dcffa52bd74b90808454e9d0c7ab85 is 50, key is test_row_0/C:col10/1732141508223/Put/seqid=0 2024-11-20T22:25:08,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742230_1406 (size=12301) 2024-11-20T22:25:08,437 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/24dcffa52bd74b90808454e9d0c7ab85 2024-11-20T22:25:08,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/40cb7493acb24539b837cc3df02db019 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/40cb7493acb24539b837cc3df02db019 2024-11-20T22:25:08,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/40cb7493acb24539b837cc3df02db019, entries=350, sequenceid=287, filesize=64.5 K 2024-11-20T22:25:08,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/861b7aab5657453bb4e307909b49f677 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/861b7aab5657453bb4e307909b49f677 2024-11-20T22:25:08,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/861b7aab5657453bb4e307909b49f677, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T22:25:08,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/24dcffa52bd74b90808454e9d0c7ab85 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/24dcffa52bd74b90808454e9d0c7ab85 2024-11-20T22:25:08,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/24dcffa52bd74b90808454e9d0c7ab85, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T22:25:08,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for bcb32e1c322c877c5cddb0d2b8bcab6a in 218ms, sequenceid=287, compaction requested=true 2024-11-20T22:25:08,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:08,456 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,456 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:08,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:08,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:08,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T22:25:08,457 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:08,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:08,457 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T22:25:08,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:08,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:08,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:08,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:08,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:08,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:08,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:08,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:08,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:08,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:08,459 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:08,459 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/B is initiating minor compaction (all files) 2024-11-20T22:25:08,460 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/B in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:08,460 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/df196f56af5d44888ceebee93e2584c0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/bc5c59d280564f20b9d006643ac06756, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/861b7aab5657453bb4e307909b49f677] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=36.5 K 2024-11-20T22:25:08,460 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128963 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:08,460 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/A is initiating minor compaction (all files) 2024-11-20T22:25:08,460 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/A in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:08,460 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fb59bf2ba9b549a18968689868fa48a3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fca5d3d3d2954cb88629fa0fd296e519, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/40cb7493acb24539b837cc3df02db019] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=125.9 K 2024-11-20T22:25:08,460 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:08,460 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fb59bf2ba9b549a18968689868fa48a3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fca5d3d3d2954cb88629fa0fd296e519, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/40cb7493acb24539b837cc3df02db019] 2024-11-20T22:25:08,461 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting df196f56af5d44888ceebee93e2584c0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732141505250 2024-11-20T22:25:08,461 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb59bf2ba9b549a18968689868fa48a3, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732141505250 2024-11-20T22:25:08,462 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting fca5d3d3d2954cb88629fa0fd296e519, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732141505937 2024-11-20T22:25:08,462 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting bc5c59d280564f20b9d006643ac06756, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732141505937 2024-11-20T22:25:08,462 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 861b7aab5657453bb4e307909b49f677, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732141508223 2024-11-20T22:25:08,464 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40cb7493acb24539b837cc3df02db019, keycount=350, bloomtype=ROW, size=64.5 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732141508174 2024-11-20T22:25:08,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120690b3f0a910246af97f2312ff943daeb_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141508341/Put/seqid=0 2024-11-20T22:25:08,481 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#B#compaction#337 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:08,481 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/493e554f511a41dc9331c017a5e1def4 is 50, key is test_row_0/B:col10/1732141508223/Put/seqid=0 2024-11-20T22:25:08,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:08,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:08,491 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:08,505 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120e174ebe900434b20a2cd5dc4438937cd_bcb32e1c322c877c5cddb0d2b8bcab6a store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:08,507 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120e174ebe900434b20a2cd5dc4438937cd_bcb32e1c322c877c5cddb0d2b8bcab6a, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:08,507 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e174ebe900434b20a2cd5dc4438937cd_bcb32e1c322c877c5cddb0d2b8bcab6a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:08,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742231_1407 (size=12454) 2024-11-20T22:25:08,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:08,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141568503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:08,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:08,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141568504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,514 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120690b3f0a910246af97f2312ff943daeb_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120690b3f0a910246af97f2312ff943daeb_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:08,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/e8a93f3ba57b461483daa80e56e7508a, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:08,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/e8a93f3ba57b461483daa80e56e7508a is 175, key is test_row_0/A:col10/1732141508341/Put/seqid=0 2024-11-20T22:25:08,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:08,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141568505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742232_1408 (size=12983) 2024-11-20T22:25:08,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742234_1410 (size=31255) 2024-11-20T22:25:08,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742233_1409 (size=4469) 2024-11-20T22:25:08,536 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#A#compaction#338 average throughput is 0.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:08,537 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/fa58ba0a6bb84894aeb90c94bdb525a2 is 175, key is test_row_0/A:col10/1732141508223/Put/seqid=0 2024-11-20T22:25:08,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742235_1411 (size=31937) 2024-11-20T22:25:08,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T22:25:08,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:08,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141568610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:08,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141568614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:08,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141568618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141568821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:08,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141568826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:08,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141568828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:08,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T22:25:08,925 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/493e554f511a41dc9331c017a5e1def4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/493e554f511a41dc9331c017a5e1def4 2024-11-20T22:25:08,930 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/B of bcb32e1c322c877c5cddb0d2b8bcab6a into 493e554f511a41dc9331c017a5e1def4(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:08,930 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:08,930 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/B, priority=13, startTime=1732141508457; duration=0sec 2024-11-20T22:25:08,930 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:08,930 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:B 2024-11-20T22:25:08,930 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:08,931 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:08,931 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/C is initiating minor compaction (all files) 2024-11-20T22:25:08,931 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/C in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:08,931 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/587d88b097024d5c8cb7962a4adadab5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/6bcc6ec955434ac385a2aaf6b124ebda, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/24dcffa52bd74b90808454e9d0c7ab85] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=36.5 K 2024-11-20T22:25:08,932 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 587d88b097024d5c8cb7962a4adadab5, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732141505250 2024-11-20T22:25:08,932 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bcc6ec955434ac385a2aaf6b124ebda, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732141505937 2024-11-20T22:25:08,932 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=313, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/e8a93f3ba57b461483daa80e56e7508a 2024-11-20T22:25:08,932 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 24dcffa52bd74b90808454e9d0c7ab85, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732141508223 2024-11-20T22:25:08,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/2ef25070f5e64f108617fa984c6c84a8 is 50, key is test_row_0/B:col10/1732141508341/Put/seqid=0 2024-11-20T22:25:08,946 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#C#compaction#340 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:08,947 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/1a32703b4e6d41cdb460f5a1f04124f2 is 50, key is test_row_0/C:col10/1732141508223/Put/seqid=0 2024-11-20T22:25:08,950 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/fa58ba0a6bb84894aeb90c94bdb525a2 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fa58ba0a6bb84894aeb90c94bdb525a2 2024-11-20T22:25:08,954 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/A of bcb32e1c322c877c5cddb0d2b8bcab6a into fa58ba0a6bb84894aeb90c94bdb525a2(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:08,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742236_1412 (size=12301) 2024-11-20T22:25:08,954 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:08,954 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/A, priority=13, startTime=1732141508456; duration=0sec 2024-11-20T22:25:08,954 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:08,954 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:A 2024-11-20T22:25:08,955 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/2ef25070f5e64f108617fa984c6c84a8 2024-11-20T22:25:08,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742237_1413 (size=12983) 2024-11-20T22:25:08,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/fbc18ab77f214742b8fba1ae0ec4a21c is 50, key is test_row_0/C:col10/1732141508341/Put/seqid=0 2024-11-20T22:25:08,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742238_1414 (size=12301) 2024-11-20T22:25:08,993 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/fbc18ab77f214742b8fba1ae0ec4a21c 2024-11-20T22:25:08,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/e8a93f3ba57b461483daa80e56e7508a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e8a93f3ba57b461483daa80e56e7508a 2024-11-20T22:25:09,004 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e8a93f3ba57b461483daa80e56e7508a, entries=150, sequenceid=313, filesize=30.5 K 2024-11-20T22:25:09,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/2ef25070f5e64f108617fa984c6c84a8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2ef25070f5e64f108617fa984c6c84a8 2024-11-20T22:25:09,011 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2ef25070f5e64f108617fa984c6c84a8, entries=150, sequenceid=313, filesize=12.0 K 2024-11-20T22:25:09,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/fbc18ab77f214742b8fba1ae0ec4a21c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/fbc18ab77f214742b8fba1ae0ec4a21c 2024-11-20T22:25:09,018 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/fbc18ab77f214742b8fba1ae0ec4a21c, entries=150, sequenceid=313, filesize=12.0 K 2024-11-20T22:25:09,019 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for bcb32e1c322c877c5cddb0d2b8bcab6a in 562ms, sequenceid=313, compaction requested=false 2024-11-20T22:25:09,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:09,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:09,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-20T22:25:09,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-20T22:25:09,022 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-20T22:25:09,022 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 718 msec 2024-11-20T22:25:09,023 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 721 msec 2024-11-20T22:25:09,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:09,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:25:09,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:09,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:09,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:09,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:09,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:09,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:09,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112001dba192a8ca4f83801bfbc33e74cda4_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141508501/Put/seqid=0 2024-11-20T22:25:09,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742239_1415 (size=14994) 2024-11-20T22:25:09,160 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:09,164 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112001dba192a8ca4f83801bfbc33e74cda4_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112001dba192a8ca4f83801bfbc33e74cda4_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:09,164 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/15f49442c5c94636aed2d2d26543f811, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:09,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/15f49442c5c94636aed2d2d26543f811 is 175, key is test_row_0/A:col10/1732141508501/Put/seqid=0 2024-11-20T22:25:09,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742240_1416 (size=39949) 2024-11-20T22:25:09,177 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=326, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/15f49442c5c94636aed2d2d26543f811 2024-11-20T22:25:09,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/4e86339b12b149e1b100454b7e2a9096 is 50, key is test_row_0/B:col10/1732141508501/Put/seqid=0 2024-11-20T22:25:09,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742241_1417 (size=12301) 2024-11-20T22:25:09,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141569207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141569208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141569209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141569317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141569317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141569317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,391 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/1a32703b4e6d41cdb460f5a1f04124f2 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/1a32703b4e6d41cdb460f5a1f04124f2 2024-11-20T22:25:09,397 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/C of bcb32e1c322c877c5cddb0d2b8bcab6a into 1a32703b4e6d41cdb460f5a1f04124f2(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:09,397 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:09,397 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/C, priority=13, startTime=1732141508458; duration=0sec 2024-11-20T22:25:09,397 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:09,397 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:C 2024-11-20T22:25:09,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T22:25:09,407 INFO [Thread-1494 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-20T22:25:09,416 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:09,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-20T22:25:09,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T22:25:09,427 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:09,428 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:09,428 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:09,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T22:25:09,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141569522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141569526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141569527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T22:25:09,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:09,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:09,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:09,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:09,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:09,606 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/4e86339b12b149e1b100454b7e2a9096 2024-11-20T22:25:09,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/249a4a41e29042c388a20edc108ec8e9 is 50, key is test_row_0/C:col10/1732141508501/Put/seqid=0 2024-11-20T22:25:09,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742242_1418 (size=12301) 2024-11-20T22:25:09,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/249a4a41e29042c388a20edc108ec8e9 2024-11-20T22:25:09,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/15f49442c5c94636aed2d2d26543f811 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/15f49442c5c94636aed2d2d26543f811 2024-11-20T22:25:09,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/15f49442c5c94636aed2d2d26543f811, entries=200, sequenceid=326, filesize=39.0 K 2024-11-20T22:25:09,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/4e86339b12b149e1b100454b7e2a9096 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/4e86339b12b149e1b100454b7e2a9096 2024-11-20T22:25:09,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/4e86339b12b149e1b100454b7e2a9096, entries=150, sequenceid=326, filesize=12.0 K 2024-11-20T22:25:09,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/249a4a41e29042c388a20edc108ec8e9 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/249a4a41e29042c388a20edc108ec8e9 2024-11-20T22:25:09,659 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/249a4a41e29042c388a20edc108ec8e9, entries=150, sequenceid=326, filesize=12.0 K 2024-11-20T22:25:09,660 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for bcb32e1c322c877c5cddb0d2b8bcab6a in 527ms, sequenceid=326, compaction requested=true 2024-11-20T22:25:09,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:09,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:09,660 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:09,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:09,661 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:09,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:09,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:09,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:09,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:09,661 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:09,661 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/A is initiating minor compaction (all files) 2024-11-20T22:25:09,661 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/A in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:09,662 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fa58ba0a6bb84894aeb90c94bdb525a2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e8a93f3ba57b461483daa80e56e7508a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/15f49442c5c94636aed2d2d26543f811] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=100.7 K 2024-11-20T22:25:09,662 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:09,662 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fa58ba0a6bb84894aeb90c94bdb525a2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e8a93f3ba57b461483daa80e56e7508a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/15f49442c5c94636aed2d2d26543f811] 2024-11-20T22:25:09,663 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:09,663 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa58ba0a6bb84894aeb90c94bdb525a2, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732141508223 2024-11-20T22:25:09,663 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/B is initiating minor compaction (all files) 2024-11-20T22:25:09,663 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/B in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:09,663 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/493e554f511a41dc9331c017a5e1def4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2ef25070f5e64f108617fa984c6c84a8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/4e86339b12b149e1b100454b7e2a9096] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=36.7 K 2024-11-20T22:25:09,663 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8a93f3ba57b461483daa80e56e7508a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732141508340 2024-11-20T22:25:09,663 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 493e554f511a41dc9331c017a5e1def4, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732141508223 2024-11-20T22:25:09,663 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15f49442c5c94636aed2d2d26543f811, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732141508493 2024-11-20T22:25:09,664 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ef25070f5e64f108617fa984c6c84a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732141508340 2024-11-20T22:25:09,664 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e86339b12b149e1b100454b7e2a9096, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732141508493 2024-11-20T22:25:09,671 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:09,676 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#B#compaction#346 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:09,677 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411200ca1aa700abb4352ae63546a3962cda5_bcb32e1c322c877c5cddb0d2b8bcab6a store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:09,678 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/6c68f692c832415aafc74c39ac81609f is 50, key is test_row_0/B:col10/1732141508501/Put/seqid=0 2024-11-20T22:25:09,680 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411200ca1aa700abb4352ae63546a3962cda5_bcb32e1c322c877c5cddb0d2b8bcab6a, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:09,680 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200ca1aa700abb4352ae63546a3962cda5_bcb32e1c322c877c5cddb0d2b8bcab6a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:09,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742244_1420 (size=4469) 2024-11-20T22:25:09,694 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#A#compaction#345 average throughput is 1.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:09,695 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/129d4bfba0964afebb92e0ea22c4f225 is 175, key is test_row_0/A:col10/1732141508501/Put/seqid=0 2024-11-20T22:25:09,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742243_1419 (size=13085) 2024-11-20T22:25:09,710 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/6c68f692c832415aafc74c39ac81609f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/6c68f692c832415aafc74c39ac81609f 2024-11-20T22:25:09,719 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/B of bcb32e1c322c877c5cddb0d2b8bcab6a into 6c68f692c832415aafc74c39ac81609f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:09,719 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:09,719 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/B, priority=13, startTime=1732141509661; duration=0sec 2024-11-20T22:25:09,719 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:09,719 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:B 2024-11-20T22:25:09,719 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:09,720 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:09,720 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/C is initiating minor compaction (all files) 2024-11-20T22:25:09,720 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/C in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:09,720 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/1a32703b4e6d41cdb460f5a1f04124f2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/fbc18ab77f214742b8fba1ae0ec4a21c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/249a4a41e29042c388a20edc108ec8e9] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=36.7 K 2024-11-20T22:25:09,721 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a32703b4e6d41cdb460f5a1f04124f2, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732141508223 2024-11-20T22:25:09,721 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting fbc18ab77f214742b8fba1ae0ec4a21c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732141508340 2024-11-20T22:25:09,721 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 249a4a41e29042c388a20edc108ec8e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732141508493 2024-11-20T22:25:09,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742245_1421 (size=32039) 2024-11-20T22:25:09,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T22:25:09,730 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#C#compaction#347 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:09,731 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/ac9c06fec7e74d82952e7d3fba1caa6c is 50, key is test_row_0/C:col10/1732141508501/Put/seqid=0 2024-11-20T22:25:09,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742246_1422 (size=13085) 2024-11-20T22:25:09,737 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T22:25:09,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:09,738 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:25:09,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:09,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:09,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:09,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:09,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:09,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:09,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fb02584d023a40088d68891f0517e0fb_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141509207/Put/seqid=0 2024-11-20T22:25:09,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742247_1423 (size=12454) 2024-11-20T22:25:09,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:09,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:09,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141569844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141569849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141569850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141569951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141569957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:09,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:09,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141569957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:10,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T22:25:10,129 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/129d4bfba0964afebb92e0ea22c4f225 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/129d4bfba0964afebb92e0ea22c4f225 2024-11-20T22:25:10,135 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/A of bcb32e1c322c877c5cddb0d2b8bcab6a into 129d4bfba0964afebb92e0ea22c4f225(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:10,135 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:10,135 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/A, priority=13, startTime=1732141509660; duration=0sec 2024-11-20T22:25:10,135 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:10,135 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:A 2024-11-20T22:25:10,142 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/ac9c06fec7e74d82952e7d3fba1caa6c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ac9c06fec7e74d82952e7d3fba1caa6c 2024-11-20T22:25:10,147 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/C of bcb32e1c322c877c5cddb0d2b8bcab6a into ac9c06fec7e74d82952e7d3fba1caa6c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:10,147 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:10,147 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/C, priority=13, startTime=1732141509661; duration=0sec 2024-11-20T22:25:10,148 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:10,148 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:C 2024-11-20T22:25:10,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:10,155 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fb02584d023a40088d68891f0517e0fb_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb02584d023a40088d68891f0517e0fb_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:10,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/228fac4bddd04b3a9e0bcc2532086e3b, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:10,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/228fac4bddd04b3a9e0bcc2532086e3b is 175, key is test_row_0/A:col10/1732141509207/Put/seqid=0 2024-11-20T22:25:10,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742248_1424 (size=31255) 2024-11-20T22:25:10,161 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=353, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/228fac4bddd04b3a9e0bcc2532086e3b 2024-11-20T22:25:10,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141570156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:10,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141570163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:10,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141570164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:10,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/1c9962f9c1144b7eacfac94fbdea3d3b is 50, key is test_row_0/B:col10/1732141509207/Put/seqid=0 2024-11-20T22:25:10,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742249_1425 (size=12301) 2024-11-20T22:25:10,181 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/1c9962f9c1144b7eacfac94fbdea3d3b 2024-11-20T22:25:10,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/ec6a3094930349198f41080d146429f4 is 50, key is test_row_0/C:col10/1732141509207/Put/seqid=0 2024-11-20T22:25:10,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742250_1426 (size=12301) 2024-11-20T22:25:10,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141570464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:10,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141570471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:10,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141570473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:10,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T22:25:10,596 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/ec6a3094930349198f41080d146429f4 2024-11-20T22:25:10,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/228fac4bddd04b3a9e0bcc2532086e3b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/228fac4bddd04b3a9e0bcc2532086e3b 2024-11-20T22:25:10,607 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/228fac4bddd04b3a9e0bcc2532086e3b, entries=150, sequenceid=353, filesize=30.5 K 2024-11-20T22:25:10,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/1c9962f9c1144b7eacfac94fbdea3d3b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1c9962f9c1144b7eacfac94fbdea3d3b 2024-11-20T22:25:10,612 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1c9962f9c1144b7eacfac94fbdea3d3b, entries=150, sequenceid=353, filesize=12.0 K 2024-11-20T22:25:10,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/ec6a3094930349198f41080d146429f4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ec6a3094930349198f41080d146429f4 2024-11-20T22:25:10,616 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ec6a3094930349198f41080d146429f4, entries=150, sequenceid=353, filesize=12.0 K 2024-11-20T22:25:10,616 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for bcb32e1c322c877c5cddb0d2b8bcab6a in 879ms, sequenceid=353, compaction requested=false 2024-11-20T22:25:10,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:10,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:10,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-20T22:25:10,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-20T22:25:10,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-20T22:25:10,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1890 sec 2024-11-20T22:25:10,619 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.2030 sec 2024-11-20T22:25:10,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:25:10,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:10,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:10,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:10,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:10,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:10,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:10,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:10,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a80ddbfce126492fbb7f40fe21cf95a9_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141510788/Put/seqid=0 2024-11-20T22:25:10,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742251_1427 (size=14994) 2024-11-20T22:25:10,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141570902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:10,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141570916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:10,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141570973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:10,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141570982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:10,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141570983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141571017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141571025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,209 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:11,216 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a80ddbfce126492fbb7f40fe21cf95a9_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a80ddbfce126492fbb7f40fe21cf95a9_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:11,218 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/ad92a60d1b3b426687c90bfb897d146f, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:11,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/ad92a60d1b3b426687c90bfb897d146f is 175, key is test_row_0/A:col10/1732141510788/Put/seqid=0 2024-11-20T22:25:11,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141571222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742252_1428 (size=39949) 2024-11-20T22:25:11,236 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=368, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/ad92a60d1b3b426687c90bfb897d146f 2024-11-20T22:25:11,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/08ea03d4b2c146ada3b90198fcc512cd is 50, key is test_row_0/B:col10/1732141510788/Put/seqid=0 2024-11-20T22:25:11,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141571243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742253_1429 (size=12301) 2024-11-20T22:25:11,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/08ea03d4b2c146ada3b90198fcc512cd 2024-11-20T22:25:11,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/95e211dbea8543b5a9d739e1bd2443de is 50, key is test_row_0/C:col10/1732141510788/Put/seqid=0 2024-11-20T22:25:11,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742254_1430 (size=12301) 2024-11-20T22:25:11,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/95e211dbea8543b5a9d739e1bd2443de 2024-11-20T22:25:11,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/ad92a60d1b3b426687c90bfb897d146f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/ad92a60d1b3b426687c90bfb897d146f 2024-11-20T22:25:11,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/ad92a60d1b3b426687c90bfb897d146f, entries=200, sequenceid=368, filesize=39.0 K 2024-11-20T22:25:11,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/08ea03d4b2c146ada3b90198fcc512cd as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/08ea03d4b2c146ada3b90198fcc512cd 2024-11-20T22:25:11,406 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/08ea03d4b2c146ada3b90198fcc512cd, entries=150, sequenceid=368, filesize=12.0 K 2024-11-20T22:25:11,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/95e211dbea8543b5a9d739e1bd2443de as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/95e211dbea8543b5a9d739e1bd2443de 2024-11-20T22:25:11,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/95e211dbea8543b5a9d739e1bd2443de, entries=150, sequenceid=368, filesize=12.0 K 2024-11-20T22:25:11,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for bcb32e1c322c877c5cddb0d2b8bcab6a in 624ms, sequenceid=368, compaction requested=true 2024-11-20T22:25:11,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:11,414 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:11,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:11,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:11,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:11,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:11,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:11,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:11,414 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:11,415 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:11,415 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/A is initiating minor compaction (all files) 2024-11-20T22:25:11,415 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/A in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:11,415 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/129d4bfba0964afebb92e0ea22c4f225, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/228fac4bddd04b3a9e0bcc2532086e3b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/ad92a60d1b3b426687c90bfb897d146f] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=100.8 K 2024-11-20T22:25:11,415 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:11,415 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/129d4bfba0964afebb92e0ea22c4f225, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/228fac4bddd04b3a9e0bcc2532086e3b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/ad92a60d1b3b426687c90bfb897d146f] 2024-11-20T22:25:11,419 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 129d4bfba0964afebb92e0ea22c4f225, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732141508493 2024-11-20T22:25:11,419 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:11,419 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/B is initiating minor compaction (all files) 2024-11-20T22:25:11,419 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/B in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:11,419 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/6c68f692c832415aafc74c39ac81609f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1c9962f9c1144b7eacfac94fbdea3d3b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/08ea03d4b2c146ada3b90198fcc512cd] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=36.8 K 2024-11-20T22:25:11,419 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 228fac4bddd04b3a9e0bcc2532086e3b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732141509194 2024-11-20T22:25:11,419 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c68f692c832415aafc74c39ac81609f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732141508493 2024-11-20T22:25:11,419 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ad92a60d1b3b426687c90bfb897d146f, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732141509843 2024-11-20T22:25:11,421 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c9962f9c1144b7eacfac94fbdea3d3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732141509194 2024-11-20T22:25:11,421 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08ea03d4b2c146ada3b90198fcc512cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732141509843 2024-11-20T22:25:11,435 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:11,437 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#B#compaction#355 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:11,437 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/0ca6c654fc5f4786b9a7f2fe2215c4a2 is 50, key is test_row_0/B:col10/1732141510788/Put/seqid=0 2024-11-20T22:25:11,445 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411203187e63638bf4ce4838e0035e8aa2c93_bcb32e1c322c877c5cddb0d2b8bcab6a store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:11,447 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411203187e63638bf4ce4838e0035e8aa2c93_bcb32e1c322c877c5cddb0d2b8bcab6a, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:11,447 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203187e63638bf4ce4838e0035e8aa2c93_bcb32e1c322c877c5cddb0d2b8bcab6a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:11,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742255_1431 (size=13187) 2024-11-20T22:25:11,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742256_1432 (size=4469) 2024-11-20T22:25:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T22:25:11,527 INFO [Thread-1494 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-20T22:25:11,529 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-20T22:25:11,530 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T22:25:11,530 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:11,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:11,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:25:11,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:11,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:11,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:11,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:11,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:11,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:11,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:11,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207c44e30dda7d4dfbab9fcbab784fb22e_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141510908/Put/seqid=0 2024-11-20T22:25:11,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742257_1433 (size=14994) 2024-11-20T22:25:11,609 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:11,613 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207c44e30dda7d4dfbab9fcbab784fb22e_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207c44e30dda7d4dfbab9fcbab784fb22e_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:11,613 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/6f8f052fcc9245849a13d56c76d24346, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:11,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/6f8f052fcc9245849a13d56c76d24346 is 175, key is test_row_0/A:col10/1732141510908/Put/seqid=0 2024-11-20T22:25:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T22:25:11,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141571632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742258_1434 (size=39949) 2024-11-20T22:25:11,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141571641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,659 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=392, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/6f8f052fcc9245849a13d56c76d24346 2024-11-20T22:25:11,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/1c76aa336093465481e2a3ec1bee64db is 50, key is test_row_0/B:col10/1732141510908/Put/seqid=0 2024-11-20T22:25:11,682 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,682 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T22:25:11,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:11,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:11,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:11,682 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742259_1435 (size=12301) 2024-11-20T22:25:11,715 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/1c76aa336093465481e2a3ec1bee64db 2024-11-20T22:25:11,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/e1b7816af35b4cddad658b12a56681dd is 50, key is test_row_0/C:col10/1732141510908/Put/seqid=0 2024-11-20T22:25:11,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141571743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141571757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742260_1436 (size=12301) 2024-11-20T22:25:11,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T22:25:11,837 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T22:25:11,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:11,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:11,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:11,838 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,855 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/0ca6c654fc5f4786b9a7f2fe2215c4a2 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/0ca6c654fc5f4786b9a7f2fe2215c4a2 2024-11-20T22:25:11,871 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#A#compaction#354 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:11,872 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/cd783ea08be345c881090f87d43d53e0 is 175, key is test_row_0/A:col10/1732141510788/Put/seqid=0 2024-11-20T22:25:11,876 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/B of bcb32e1c322c877c5cddb0d2b8bcab6a into 0ca6c654fc5f4786b9a7f2fe2215c4a2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:11,876 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:11,876 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/B, priority=13, startTime=1732141511414; duration=0sec 2024-11-20T22:25:11,877 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:11,877 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:B 2024-11-20T22:25:11,877 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:11,880 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:11,880 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/C is initiating minor compaction (all files) 2024-11-20T22:25:11,880 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/C in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:11,880 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ac9c06fec7e74d82952e7d3fba1caa6c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ec6a3094930349198f41080d146429f4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/95e211dbea8543b5a9d739e1bd2443de] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=36.8 K 2024-11-20T22:25:11,881 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac9c06fec7e74d82952e7d3fba1caa6c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732141508493 2024-11-20T22:25:11,881 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec6a3094930349198f41080d146429f4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732141509194 2024-11-20T22:25:11,882 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95e211dbea8543b5a9d739e1bd2443de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732141509843 2024-11-20T22:25:11,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742261_1437 (size=32141) 2024-11-20T22:25:11,933 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#C#compaction#359 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:11,933 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/034b36929fd044e7a697cbb57c1d4d07 is 50, key is test_row_0/C:col10/1732141510788/Put/seqid=0 2024-11-20T22:25:11,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742262_1438 (size=13187) 2024-11-20T22:25:11,969 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/034b36929fd044e7a697cbb57c1d4d07 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/034b36929fd044e7a697cbb57c1d4d07 2024-11-20T22:25:11,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141571955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141571969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,980 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/C of bcb32e1c322c877c5cddb0d2b8bcab6a into 034b36929fd044e7a697cbb57c1d4d07(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:11,980 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:11,980 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/C, priority=13, startTime=1732141511414; duration=0sec 2024-11-20T22:25:11,980 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:11,980 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:C 2024-11-20T22:25:11,990 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:11,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T22:25:11,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:11,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:11,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:11,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:12,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48102 deadline: 1732141571988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:12,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48128 deadline: 1732141571995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:12,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48058 deadline: 1732141571997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T22:25:12,147 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T22:25:12,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:12,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,203 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/e1b7816af35b4cddad658b12a56681dd 2024-11-20T22:25:12,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/6f8f052fcc9245849a13d56c76d24346 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/6f8f052fcc9245849a13d56c76d24346 2024-11-20T22:25:12,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/6f8f052fcc9245849a13d56c76d24346, entries=200, sequenceid=392, filesize=39.0 K 2024-11-20T22:25:12,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/1c76aa336093465481e2a3ec1bee64db as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1c76aa336093465481e2a3ec1bee64db 2024-11-20T22:25:12,221 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1c76aa336093465481e2a3ec1bee64db, entries=150, sequenceid=392, filesize=12.0 K 2024-11-20T22:25:12,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/e1b7816af35b4cddad658b12a56681dd as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/e1b7816af35b4cddad658b12a56681dd 2024-11-20T22:25:12,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/e1b7816af35b4cddad658b12a56681dd, entries=150, sequenceid=392, filesize=12.0 K 2024-11-20T22:25:12,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for bcb32e1c322c877c5cddb0d2b8bcab6a in 688ms, sequenceid=392, compaction requested=false 2024-11-20T22:25:12,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:12,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:12,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:25:12,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:12,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:12,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:12,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:12,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:12,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:12,299 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T22:25:12,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:12,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,300 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120312e2a1f6b5f4d4691d39f02286c566a_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141512280/Put/seqid=0 2024-11-20T22:25:12,327 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/cd783ea08be345c881090f87d43d53e0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/cd783ea08be345c881090f87d43d53e0 2024-11-20T22:25:12,333 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/A of bcb32e1c322c877c5cddb0d2b8bcab6a into cd783ea08be345c881090f87d43d53e0(size=31.4 K), total size for store is 70.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:12,333 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:12,333 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/A, priority=13, startTime=1732141511414; duration=0sec 2024-11-20T22:25:12,333 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:12,333 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:A 2024-11-20T22:25:12,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742263_1439 (size=14994) 2024-11-20T22:25:12,347 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,355 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120312e2a1f6b5f4d4691d39f02286c566a_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120312e2a1f6b5f4d4691d39f02286c566a_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:12,357 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/2e6a8e47c1534a158d97e125c17b0353, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:12,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/2e6a8e47c1534a158d97e125c17b0353 is 175, key is test_row_0/A:col10/1732141512280/Put/seqid=0 2024-11-20T22:25:12,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742264_1440 (size=39949) 2024-11-20T22:25:12,410 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=407, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/2e6a8e47c1534a158d97e125c17b0353 2024-11-20T22:25:12,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/4534236c20084b389f40867f917f2dba is 50, key is test_row_0/B:col10/1732141512280/Put/seqid=0 2024-11-20T22:25:12,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:12,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141572435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:12,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141572436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,453 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T22:25:12,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:12,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742265_1441 (size=12301) 2024-11-20T22:25:12,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:12,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141572549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:12,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141572555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,568 DEBUG [Thread-1501 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26b6d860 to 127.0.0.1:51822 2024-11-20T22:25:12,568 DEBUG [Thread-1501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:12,572 DEBUG [Thread-1497 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5910b8c7 to 127.0.0.1:51822 2024-11-20T22:25:12,572 DEBUG [Thread-1497 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:12,576 DEBUG [Thread-1495 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42af2962 to 127.0.0.1:51822 2024-11-20T22:25:12,576 DEBUG [Thread-1495 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:12,580 DEBUG [Thread-1503 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x16722a1f to 127.0.0.1:51822 2024-11-20T22:25:12,580 DEBUG [Thread-1503 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:12,581 DEBUG [Thread-1499 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x003f9a05 to 127.0.0.1:51822 2024-11-20T22:25:12,581 DEBUG [Thread-1499 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:12,608 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T22:25:12,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:12,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T22:25:12,762 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48138 deadline: 1732141572762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T22:25:12,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48142 deadline: 1732141572761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:12,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,771 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/4534236c20084b389f40867f917f2dba 2024-11-20T22:25:12,908 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/176856fc7c024c2a957b346e702e2023 is 50, key is test_row_0/C:col10/1732141512280/Put/seqid=0 2024-11-20T22:25:12,927 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:12,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T22:25:12,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:12,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742266_1442 (size=12301) 2024-11-20T22:25:12,938 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/176856fc7c024c2a957b346e702e2023 2024-11-20T22:25:12,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/2e6a8e47c1534a158d97e125c17b0353 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2e6a8e47c1534a158d97e125c17b0353 2024-11-20T22:25:12,954 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2e6a8e47c1534a158d97e125c17b0353, entries=200, sequenceid=407, filesize=39.0 K 2024-11-20T22:25:12,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/4534236c20084b389f40867f917f2dba as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/4534236c20084b389f40867f917f2dba 2024-11-20T22:25:12,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/4534236c20084b389f40867f917f2dba, entries=150, sequenceid=407, filesize=12.0 K 2024-11-20T22:25:12,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/176856fc7c024c2a957b346e702e2023 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/176856fc7c024c2a957b346e702e2023 2024-11-20T22:25:12,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/176856fc7c024c2a957b346e702e2023, entries=150, sequenceid=407, filesize=12.0 K 2024-11-20T22:25:12,982 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for bcb32e1c322c877c5cddb0d2b8bcab6a in 701ms, sequenceid=407, compaction requested=true 2024-11-20T22:25:12,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:12,982 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:12,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:12,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:12,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:12,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:12,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcb32e1c322c877c5cddb0d2b8bcab6a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:12,983 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:12,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:12,984 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112039 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:12,984 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/A is initiating minor compaction (all files) 2024-11-20T22:25:12,984 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/A in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,984 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/cd783ea08be345c881090f87d43d53e0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/6f8f052fcc9245849a13d56c76d24346, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2e6a8e47c1534a158d97e125c17b0353] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=109.4 K 2024-11-20T22:25:12,984 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,984 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/cd783ea08be345c881090f87d43d53e0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/6f8f052fcc9245849a13d56c76d24346, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2e6a8e47c1534a158d97e125c17b0353] 2024-11-20T22:25:12,984 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:12,984 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd783ea08be345c881090f87d43d53e0, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732141509843 2024-11-20T22:25:12,984 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/B is initiating minor compaction (all files) 2024-11-20T22:25:12,985 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/B in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:12,985 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/0ca6c654fc5f4786b9a7f2fe2215c4a2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1c76aa336093465481e2a3ec1bee64db, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/4534236c20084b389f40867f917f2dba] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=36.9 K 2024-11-20T22:25:12,985 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f8f052fcc9245849a13d56c76d24346, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732141510877 2024-11-20T22:25:12,985 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ca6c654fc5f4786b9a7f2fe2215c4a2, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732141509843 2024-11-20T22:25:12,985 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e6a8e47c1534a158d97e125c17b0353, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732141511591 2024-11-20T22:25:12,986 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c76aa336093465481e2a3ec1bee64db, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732141510877 2024-11-20T22:25:12,986 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4534236c20084b389f40867f917f2dba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732141511591 2024-11-20T22:25:12,992 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:13,009 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#B#compaction#364 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:13,009 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/bef7961b4b92442e9d24c9e3e9893c2e is 50, key is test_row_0/B:col10/1732141512280/Put/seqid=0 2024-11-20T22:25:13,012 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112094ac5c1ba5104424aec75bcd060366ef_bcb32e1c322c877c5cddb0d2b8bcab6a store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:13,016 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112094ac5c1ba5104424aec75bcd060366ef_bcb32e1c322c877c5cddb0d2b8bcab6a, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:13,016 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112094ac5c1ba5104424aec75bcd060366ef_bcb32e1c322c877c5cddb0d2b8bcab6a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:13,083 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:13,084 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T22:25:13,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:13,084 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:25:13,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742267_1443 (size=13289) 2024-11-20T22:25:13,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742268_1444 (size=4469) 2024-11-20T22:25:13,133 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#A#compaction#363 average throughput is 0.17 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:13,134 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/fef0ef5a7c3947569e8f5c923b7504a6 is 175, key is test_row_0/A:col10/1732141512280/Put/seqid=0 2024-11-20T22:25:13,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. as already flushing 2024-11-20T22:25:13,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:13,155 DEBUG [Thread-1484 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x033feebb to 127.0.0.1:51822 2024-11-20T22:25:13,155 DEBUG [Thread-1484 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:13,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:13,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:13,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:13,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:13,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:13,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:13,157 DEBUG [Thread-1490 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3401188a to 127.0.0.1:51822 2024-11-20T22:25:13,157 DEBUG [Thread-1490 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:13,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742269_1445 (size=32243) 2024-11-20T22:25:13,178 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/fef0ef5a7c3947569e8f5c923b7504a6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fef0ef5a7c3947569e8f5c923b7504a6 2024-11-20T22:25:13,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cadb62c082ca4f12b6b806765318c58f_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141513071/Put/seqid=0 2024-11-20T22:25:13,194 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/A of bcb32e1c322c877c5cddb0d2b8bcab6a into fef0ef5a7c3947569e8f5c923b7504a6(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:13,194 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:13,194 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/A, priority=13, startTime=1732141512982; duration=0sec 2024-11-20T22:25:13,194 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:13,194 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:A 2024-11-20T22:25:13,194 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:13,196 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:13,196 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bcb32e1c322c877c5cddb0d2b8bcab6a/C is initiating minor compaction (all files) 2024-11-20T22:25:13,196 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bcb32e1c322c877c5cddb0d2b8bcab6a/C in TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:13,196 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/034b36929fd044e7a697cbb57c1d4d07, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/e1b7816af35b4cddad658b12a56681dd, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/176856fc7c024c2a957b346e702e2023] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp, totalSize=36.9 K 2024-11-20T22:25:13,196 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 034b36929fd044e7a697cbb57c1d4d07, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732141509843 2024-11-20T22:25:13,197 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1b7816af35b4cddad658b12a56681dd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732141510877 2024-11-20T22:25:13,197 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 176856fc7c024c2a957b346e702e2023, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732141511591 2024-11-20T22:25:13,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742270_1446 (size=12454) 2024-11-20T22:25:13,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,224 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcb32e1c322c877c5cddb0d2b8bcab6a#C#compaction#366 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:13,224 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cadb62c082ca4f12b6b806765318c58f_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cadb62c082ca4f12b6b806765318c58f_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:13,224 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/999cc41a2072466789fd7a5f7b1c85cf is 50, key is test_row_0/C:col10/1732141512280/Put/seqid=0 2024-11-20T22:25:13,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/c0d0703a3a75445e805ece8c89fcae16, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:13,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/c0d0703a3a75445e805ece8c89fcae16 is 175, key is test_row_0/A:col10/1732141513071/Put/seqid=0 2024-11-20T22:25:13,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742271_1447 (size=13289) 2024-11-20T22:25:13,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742272_1448 (size=31255) 2024-11-20T22:25:13,493 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/bef7961b4b92442e9d24c9e3e9893c2e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/bef7961b4b92442e9d24c9e3e9893c2e 2024-11-20T22:25:13,498 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/B of bcb32e1c322c877c5cddb0d2b8bcab6a into bef7961b4b92442e9d24c9e3e9893c2e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:13,498 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:13,498 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/B, priority=13, startTime=1732141512982; duration=0sec 2024-11-20T22:25:13,498 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:13,498 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:B 2024-11-20T22:25:13,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T22:25:13,688 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/999cc41a2072466789fd7a5f7b1c85cf as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/999cc41a2072466789fd7a5f7b1c85cf 2024-11-20T22:25:13,688 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=434, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/c0d0703a3a75445e805ece8c89fcae16 2024-11-20T22:25:13,699 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bcb32e1c322c877c5cddb0d2b8bcab6a/C of bcb32e1c322c877c5cddb0d2b8bcab6a into 999cc41a2072466789fd7a5f7b1c85cf(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:13,700 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:13,700 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a., storeName=bcb32e1c322c877c5cddb0d2b8bcab6a/C, priority=13, startTime=1732141512983; duration=0sec 2024-11-20T22:25:13,700 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:13,700 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcb32e1c322c877c5cddb0d2b8bcab6a:C 2024-11-20T22:25:13,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/a1cc7260b3d04152b79400f15d387356 is 50, key is test_row_0/B:col10/1732141513071/Put/seqid=0 2024-11-20T22:25:13,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742273_1449 (size=12301) 2024-11-20T22:25:14,018 DEBUG [Thread-1488 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a86cb71 to 127.0.0.1:51822 2024-11-20T22:25:14,018 DEBUG [Thread-1488 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:14,019 DEBUG [Thread-1492 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x55650656 to 127.0.0.1:51822 2024-11-20T22:25:14,019 DEBUG [Thread-1492 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:14,022 DEBUG [Thread-1486 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c40db2e to 127.0.0.1:51822 2024-11-20T22:25:14,022 DEBUG [Thread-1486 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:14,140 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/a1cc7260b3d04152b79400f15d387356 2024-11-20T22:25:14,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/a7adfb25e9cb41d093e60f3d86570d89 is 50, key is test_row_0/C:col10/1732141513071/Put/seqid=0 2024-11-20T22:25:14,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742274_1450 (size=12301) 2024-11-20T22:25:14,626 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/a7adfb25e9cb41d093e60f3d86570d89 2024-11-20T22:25:14,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/c0d0703a3a75445e805ece8c89fcae16 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/c0d0703a3a75445e805ece8c89fcae16 2024-11-20T22:25:14,635 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/c0d0703a3a75445e805ece8c89fcae16, entries=150, sequenceid=434, filesize=30.5 K 2024-11-20T22:25:14,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/a1cc7260b3d04152b79400f15d387356 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/a1cc7260b3d04152b79400f15d387356 2024-11-20T22:25:14,640 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/a1cc7260b3d04152b79400f15d387356, entries=150, sequenceid=434, filesize=12.0 K 2024-11-20T22:25:14,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/a7adfb25e9cb41d093e60f3d86570d89 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/a7adfb25e9cb41d093e60f3d86570d89 2024-11-20T22:25:14,645 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/a7adfb25e9cb41d093e60f3d86570d89, entries=150, sequenceid=434, filesize=12.0 K 2024-11-20T22:25:14,646 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=20.13 KB/20610 for bcb32e1c322c877c5cddb0d2b8bcab6a in 1561ms, sequenceid=434, compaction requested=false 2024-11-20T22:25:14,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:14,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:14,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-20T22:25:14,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-20T22:25:14,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-20T22:25:14,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1170 sec 2024-11-20T22:25:14,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 3.1210 sec 2024-11-20T22:25:15,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T22:25:15,643 INFO [Thread-1494 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 34 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 92 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 110 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1159 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3477 rows 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1172 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3516 rows 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1173 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3519 rows 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1168 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3504 rows 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1157 2024-11-20T22:25:15,644 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3471 rows 2024-11-20T22:25:15,644 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:25:15,644 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4dacfd49 to 127.0.0.1:51822 2024-11-20T22:25:15,644 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:15,647 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T22:25:15,647 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T22:25:15,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:15,650 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141515649"}]},"ts":"1732141515649"} 2024-11-20T22:25:15,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T22:25:15,652 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T22:25:15,658 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T22:25:15,660 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:25:15,662 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bcb32e1c322c877c5cddb0d2b8bcab6a, UNASSIGN}] 2024-11-20T22:25:15,664 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bcb32e1c322c877c5cddb0d2b8bcab6a, UNASSIGN 2024-11-20T22:25:15,664 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=bcb32e1c322c877c5cddb0d2b8bcab6a, regionState=CLOSING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:15,665 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:25:15,665 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; CloseRegionProcedure bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:25:15,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T22:25:15,817 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:15,817 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(124): Close bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:15,817 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:25:15,817 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1681): Closing bcb32e1c322c877c5cddb0d2b8bcab6a, disabling compactions & flushes 2024-11-20T22:25:15,817 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:15,817 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:15,817 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. after waiting 0 ms 2024-11-20T22:25:15,817 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:15,818 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(2837): Flushing bcb32e1c322c877c5cddb0d2b8bcab6a 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-20T22:25:15,818 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=A 2024-11-20T22:25:15,818 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:15,818 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=B 2024-11-20T22:25:15,818 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:15,818 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bcb32e1c322c877c5cddb0d2b8bcab6a, store=C 2024-11-20T22:25:15,818 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:15,848 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aa5ba3d8869e486eb418d09d3a77e611_bcb32e1c322c877c5cddb0d2b8bcab6a is 50, key is test_row_0/A:col10/1732141514021/Put/seqid=0 2024-11-20T22:25:15,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742275_1451 (size=9914) 2024-11-20T22:25:15,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T22:25:16,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T22:25:16,311 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:16,319 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aa5ba3d8869e486eb418d09d3a77e611_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aa5ba3d8869e486eb418d09d3a77e611_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:16,323 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/6dada790e7e9456891da0cc25e2c03d2, store: [table=TestAcidGuarantees family=A region=bcb32e1c322c877c5cddb0d2b8bcab6a] 2024-11-20T22:25:16,324 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/6dada790e7e9456891da0cc25e2c03d2 is 175, key is test_row_0/A:col10/1732141514021/Put/seqid=0 2024-11-20T22:25:16,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742276_1452 (size=22561) 2024-11-20T22:25:16,348 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=443, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/6dada790e7e9456891da0cc25e2c03d2 2024-11-20T22:25:16,375 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/1762f7bf995648e6a2ba6ce2ae9fab7f is 50, key is test_row_0/B:col10/1732141514021/Put/seqid=0 2024-11-20T22:25:16,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742277_1453 (size=9857) 2024-11-20T22:25:16,451 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/1762f7bf995648e6a2ba6ce2ae9fab7f 2024-11-20T22:25:16,503 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/b49bd3fd4a924214a708d764523f4bff is 50, key is test_row_0/C:col10/1732141514021/Put/seqid=0 2024-11-20T22:25:16,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742278_1454 (size=9857) 2024-11-20T22:25:16,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T22:25:16,942 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/b49bd3fd4a924214a708d764523f4bff 2024-11-20T22:25:16,983 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/A/6dada790e7e9456891da0cc25e2c03d2 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/6dada790e7e9456891da0cc25e2c03d2 2024-11-20T22:25:16,992 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/6dada790e7e9456891da0cc25e2c03d2, entries=100, sequenceid=443, filesize=22.0 K 2024-11-20T22:25:16,999 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/B/1762f7bf995648e6a2ba6ce2ae9fab7f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1762f7bf995648e6a2ba6ce2ae9fab7f 2024-11-20T22:25:17,030 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1762f7bf995648e6a2ba6ce2ae9fab7f, entries=100, sequenceid=443, filesize=9.6 K 2024-11-20T22:25:17,030 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/.tmp/C/b49bd3fd4a924214a708d764523f4bff as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/b49bd3fd4a924214a708d764523f4bff 2024-11-20T22:25:17,036 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/b49bd3fd4a924214a708d764523f4bff, entries=100, sequenceid=443, filesize=9.6 K 2024-11-20T22:25:17,044 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for bcb32e1c322c877c5cddb0d2b8bcab6a in 1225ms, sequenceid=443, compaction requested=true 2024-11-20T22:25:17,044 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/7e78901d475845ffa5a108b6e418c468, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/de43a769275e48c29cd4e748beb61ff0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/128fb923a88c4e0cb5c790a0b425ac14, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e18defaa0ee74b5c8c3bbbe65cab78d1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/add0aafb054f4bfe99738ae6d82728bc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/416c6ccb474a4e96a656dea46785dff5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/87f031d7ef60413780c215473e743e0e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/f18244a860934112aba54c2026defde5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2f1ed40deefd4a56890bbe93aedc38e9, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/d8d12766592e4bc085b8c80f5c0f280c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8cf62680bb864d11866610c66e05a173, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/226f3eaebde345a1b0e4dcfb6b41e69c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8f56c74831de4b6682b4862ba21728bb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/36a51329e1184c32800e997a93d25135, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/968be4df223d4e0188c225f6b61adcbe, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/05fb18cfffbb476ba02f9e95b1d02fd2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/831b0c5b9148478a94ad00b197358f38, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fb59bf2ba9b549a18968689868fa48a3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fca5d3d3d2954cb88629fa0fd296e519, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/40cb7493acb24539b837cc3df02db019, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fa58ba0a6bb84894aeb90c94bdb525a2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e8a93f3ba57b461483daa80e56e7508a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/15f49442c5c94636aed2d2d26543f811, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/129d4bfba0964afebb92e0ea22c4f225, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/228fac4bddd04b3a9e0bcc2532086e3b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/ad92a60d1b3b426687c90bfb897d146f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/cd783ea08be345c881090f87d43d53e0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/6f8f052fcc9245849a13d56c76d24346, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2e6a8e47c1534a158d97e125c17b0353] to archive 2024-11-20T22:25:17,047 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:17,056 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/7e78901d475845ffa5a108b6e418c468 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/7e78901d475845ffa5a108b6e418c468 2024-11-20T22:25:17,057 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/de43a769275e48c29cd4e748beb61ff0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/de43a769275e48c29cd4e748beb61ff0 2024-11-20T22:25:17,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/128fb923a88c4e0cb5c790a0b425ac14 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/128fb923a88c4e0cb5c790a0b425ac14 2024-11-20T22:25:17,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e18defaa0ee74b5c8c3bbbe65cab78d1 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e18defaa0ee74b5c8c3bbbe65cab78d1 2024-11-20T22:25:17,061 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/add0aafb054f4bfe99738ae6d82728bc to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/add0aafb054f4bfe99738ae6d82728bc 2024-11-20T22:25:17,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/416c6ccb474a4e96a656dea46785dff5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/416c6ccb474a4e96a656dea46785dff5 2024-11-20T22:25:17,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/87f031d7ef60413780c215473e743e0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/87f031d7ef60413780c215473e743e0e 2024-11-20T22:25:17,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/f18244a860934112aba54c2026defde5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/f18244a860934112aba54c2026defde5 2024-11-20T22:25:17,088 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2f1ed40deefd4a56890bbe93aedc38e9 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2f1ed40deefd4a56890bbe93aedc38e9 2024-11-20T22:25:17,115 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/d8d12766592e4bc085b8c80f5c0f280c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/d8d12766592e4bc085b8c80f5c0f280c 2024-11-20T22:25:17,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8cf62680bb864d11866610c66e05a173 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8cf62680bb864d11866610c66e05a173 2024-11-20T22:25:17,139 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/226f3eaebde345a1b0e4dcfb6b41e69c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/226f3eaebde345a1b0e4dcfb6b41e69c 2024-11-20T22:25:17,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8f56c74831de4b6682b4862ba21728bb to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/8f56c74831de4b6682b4862ba21728bb 2024-11-20T22:25:17,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/36a51329e1184c32800e997a93d25135 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/36a51329e1184c32800e997a93d25135 2024-11-20T22:25:17,177 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/968be4df223d4e0188c225f6b61adcbe to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/968be4df223d4e0188c225f6b61adcbe 2024-11-20T22:25:17,179 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/05fb18cfffbb476ba02f9e95b1d02fd2 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/05fb18cfffbb476ba02f9e95b1d02fd2 2024-11-20T22:25:17,180 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/831b0c5b9148478a94ad00b197358f38 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/831b0c5b9148478a94ad00b197358f38 2024-11-20T22:25:17,188 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fb59bf2ba9b549a18968689868fa48a3 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fb59bf2ba9b549a18968689868fa48a3 2024-11-20T22:25:17,189 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fca5d3d3d2954cb88629fa0fd296e519 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fca5d3d3d2954cb88629fa0fd296e519 2024-11-20T22:25:17,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/40cb7493acb24539b837cc3df02db019 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/40cb7493acb24539b837cc3df02db019 2024-11-20T22:25:17,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fa58ba0a6bb84894aeb90c94bdb525a2 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fa58ba0a6bb84894aeb90c94bdb525a2 2024-11-20T22:25:17,191 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e8a93f3ba57b461483daa80e56e7508a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/e8a93f3ba57b461483daa80e56e7508a 2024-11-20T22:25:17,192 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/15f49442c5c94636aed2d2d26543f811 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/15f49442c5c94636aed2d2d26543f811 2024-11-20T22:25:17,193 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/129d4bfba0964afebb92e0ea22c4f225 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/129d4bfba0964afebb92e0ea22c4f225 2024-11-20T22:25:17,194 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/228fac4bddd04b3a9e0bcc2532086e3b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/228fac4bddd04b3a9e0bcc2532086e3b 2024-11-20T22:25:17,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/ad92a60d1b3b426687c90bfb897d146f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/ad92a60d1b3b426687c90bfb897d146f 2024-11-20T22:25:17,209 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/cd783ea08be345c881090f87d43d53e0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/cd783ea08be345c881090f87d43d53e0 2024-11-20T22:25:17,227 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/6f8f052fcc9245849a13d56c76d24346 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/6f8f052fcc9245849a13d56c76d24346 2024-11-20T22:25:17,230 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2e6a8e47c1534a158d97e125c17b0353 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/2e6a8e47c1534a158d97e125c17b0353 2024-11-20T22:25:17,234 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/768bae251c944c008a10e53c40b164c5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/70a0e5af319e40c09ae042e7eef1c106, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/42a50ab1cba242e0ac0b42927f58c279, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2f30e1fd8b194e128ca27d373a69c832, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/a8972f4169e54892861cd4219829d07a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/3a125b3cc4d3442fbe66e9e0b15d75db, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/7d381056fdc04479923bb0658e08d132, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/710c1b8a557d4432be0484e47bb26015, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/8b274ac00a9c4e1ea959a176abe5a752, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/e21c58822f9643888c4656b2c4b75240, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2287a0e965e14de7bcfdd46b13e2c467, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/6b6344107640488a81c9de6953085978, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/ed8f2ae29c5d400fb7b337f3a5e21913, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/c2166be2703d40a49bd734e94deaf990, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2667fe0ca6344b9bb364879e2d4d07a0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/031260e43d4d4592b4188e9750d4181f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/df196f56af5d44888ceebee93e2584c0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/8a8e0caa5ea14ae1b8fcbc0c40e254f0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/bc5c59d280564f20b9d006643ac06756, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/493e554f511a41dc9331c017a5e1def4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/861b7aab5657453bb4e307909b49f677, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2ef25070f5e64f108617fa984c6c84a8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/6c68f692c832415aafc74c39ac81609f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/4e86339b12b149e1b100454b7e2a9096, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1c9962f9c1144b7eacfac94fbdea3d3b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/0ca6c654fc5f4786b9a7f2fe2215c4a2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/08ea03d4b2c146ada3b90198fcc512cd, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1c76aa336093465481e2a3ec1bee64db, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/4534236c20084b389f40867f917f2dba] to archive 2024-11-20T22:25:17,235 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:17,237 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/768bae251c944c008a10e53c40b164c5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/768bae251c944c008a10e53c40b164c5 2024-11-20T22:25:17,240 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/70a0e5af319e40c09ae042e7eef1c106 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/70a0e5af319e40c09ae042e7eef1c106 2024-11-20T22:25:17,241 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/42a50ab1cba242e0ac0b42927f58c279 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/42a50ab1cba242e0ac0b42927f58c279 2024-11-20T22:25:17,244 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2f30e1fd8b194e128ca27d373a69c832 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2f30e1fd8b194e128ca27d373a69c832 2024-11-20T22:25:17,244 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/a8972f4169e54892861cd4219829d07a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/a8972f4169e54892861cd4219829d07a 2024-11-20T22:25:17,245 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/3a125b3cc4d3442fbe66e9e0b15d75db to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/3a125b3cc4d3442fbe66e9e0b15d75db 2024-11-20T22:25:17,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/7d381056fdc04479923bb0658e08d132 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/7d381056fdc04479923bb0658e08d132 2024-11-20T22:25:17,247 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/710c1b8a557d4432be0484e47bb26015 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/710c1b8a557d4432be0484e47bb26015 2024-11-20T22:25:17,253 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/8b274ac00a9c4e1ea959a176abe5a752 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/8b274ac00a9c4e1ea959a176abe5a752 2024-11-20T22:25:17,255 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/e21c58822f9643888c4656b2c4b75240 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/e21c58822f9643888c4656b2c4b75240 2024-11-20T22:25:17,256 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2287a0e965e14de7bcfdd46b13e2c467 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2287a0e965e14de7bcfdd46b13e2c467 2024-11-20T22:25:17,261 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/6b6344107640488a81c9de6953085978 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/6b6344107640488a81c9de6953085978 2024-11-20T22:25:17,262 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/ed8f2ae29c5d400fb7b337f3a5e21913 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/ed8f2ae29c5d400fb7b337f3a5e21913 2024-11-20T22:25:17,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/c2166be2703d40a49bd734e94deaf990 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/c2166be2703d40a49bd734e94deaf990 2024-11-20T22:25:17,264 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2667fe0ca6344b9bb364879e2d4d07a0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2667fe0ca6344b9bb364879e2d4d07a0 2024-11-20T22:25:17,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/031260e43d4d4592b4188e9750d4181f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/031260e43d4d4592b4188e9750d4181f 2024-11-20T22:25:17,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/df196f56af5d44888ceebee93e2584c0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/df196f56af5d44888ceebee93e2584c0 2024-11-20T22:25:17,284 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/8a8e0caa5ea14ae1b8fcbc0c40e254f0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/8a8e0caa5ea14ae1b8fcbc0c40e254f0 2024-11-20T22:25:17,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/bc5c59d280564f20b9d006643ac06756 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/bc5c59d280564f20b9d006643ac06756 2024-11-20T22:25:17,288 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/493e554f511a41dc9331c017a5e1def4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/493e554f511a41dc9331c017a5e1def4 2024-11-20T22:25:17,297 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/861b7aab5657453bb4e307909b49f677 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/861b7aab5657453bb4e307909b49f677 2024-11-20T22:25:17,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2ef25070f5e64f108617fa984c6c84a8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/2ef25070f5e64f108617fa984c6c84a8 2024-11-20T22:25:17,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/6c68f692c832415aafc74c39ac81609f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/6c68f692c832415aafc74c39ac81609f 2024-11-20T22:25:17,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/4e86339b12b149e1b100454b7e2a9096 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/4e86339b12b149e1b100454b7e2a9096 2024-11-20T22:25:17,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1c9962f9c1144b7eacfac94fbdea3d3b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1c9962f9c1144b7eacfac94fbdea3d3b 2024-11-20T22:25:17,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/0ca6c654fc5f4786b9a7f2fe2215c4a2 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/0ca6c654fc5f4786b9a7f2fe2215c4a2 2024-11-20T22:25:17,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/08ea03d4b2c146ada3b90198fcc512cd to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/08ea03d4b2c146ada3b90198fcc512cd 2024-11-20T22:25:17,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1c76aa336093465481e2a3ec1bee64db to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1c76aa336093465481e2a3ec1bee64db 2024-11-20T22:25:17,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/4534236c20084b389f40867f917f2dba to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/4534236c20084b389f40867f917f2dba 2024-11-20T22:25:17,339 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/59d9d31b0aa2408b8d5c9466d7eac3ed, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/99a24d57bd3e43509e27eb5590efd3ec, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/df1cced665dc4466b57e96294d2a1f33, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/17970aae36eb4b88bfbe25a679e6adcc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/65ddd4c16078447c92dba4125f0b21b8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/7ef22879dd674b81a67bdb5a3522e599, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/3159c3cb04b341cab432fd6222093349, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/f0d1879b27eb47eb9ab76741d9391275, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/c23cfe3ba837424f905a72ddfb75f5ce, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/dbaca0becd2e4a098b6ed6bb3cf017ce, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/4d387d35ec8d4acaaf827a0eb097566a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/64a0e14971384c06a367f8354026516d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/1dccf5d3cb264456a524b55988a4b765, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/388c82c086b24d618c2604a0a1e40a6b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/5ecb6b8309c9420e9716c157af900573, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ed71e2f16e9e49c59bc2398a0feb313d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/587d88b097024d5c8cb7962a4adadab5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/4477c274c1544af197e805857caec6b0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/6bcc6ec955434ac385a2aaf6b124ebda, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/1a32703b4e6d41cdb460f5a1f04124f2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/24dcffa52bd74b90808454e9d0c7ab85, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/fbc18ab77f214742b8fba1ae0ec4a21c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ac9c06fec7e74d82952e7d3fba1caa6c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/249a4a41e29042c388a20edc108ec8e9, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ec6a3094930349198f41080d146429f4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/034b36929fd044e7a697cbb57c1d4d07, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/95e211dbea8543b5a9d739e1bd2443de, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/e1b7816af35b4cddad658b12a56681dd, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/176856fc7c024c2a957b346e702e2023] to archive 2024-11-20T22:25:17,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:17,345 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/59d9d31b0aa2408b8d5c9466d7eac3ed to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/59d9d31b0aa2408b8d5c9466d7eac3ed 2024-11-20T22:25:17,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/99a24d57bd3e43509e27eb5590efd3ec to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/99a24d57bd3e43509e27eb5590efd3ec 2024-11-20T22:25:17,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/df1cced665dc4466b57e96294d2a1f33 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/df1cced665dc4466b57e96294d2a1f33 2024-11-20T22:25:17,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/17970aae36eb4b88bfbe25a679e6adcc to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/17970aae36eb4b88bfbe25a679e6adcc 2024-11-20T22:25:17,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/65ddd4c16078447c92dba4125f0b21b8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/65ddd4c16078447c92dba4125f0b21b8 2024-11-20T22:25:17,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/7ef22879dd674b81a67bdb5a3522e599 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/7ef22879dd674b81a67bdb5a3522e599 2024-11-20T22:25:17,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/3159c3cb04b341cab432fd6222093349 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/3159c3cb04b341cab432fd6222093349 2024-11-20T22:25:17,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/f0d1879b27eb47eb9ab76741d9391275 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/f0d1879b27eb47eb9ab76741d9391275 2024-11-20T22:25:17,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/c23cfe3ba837424f905a72ddfb75f5ce to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/c23cfe3ba837424f905a72ddfb75f5ce 2024-11-20T22:25:17,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/dbaca0becd2e4a098b6ed6bb3cf017ce to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/dbaca0becd2e4a098b6ed6bb3cf017ce 2024-11-20T22:25:17,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/4d387d35ec8d4acaaf827a0eb097566a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/4d387d35ec8d4acaaf827a0eb097566a 2024-11-20T22:25:17,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/64a0e14971384c06a367f8354026516d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/64a0e14971384c06a367f8354026516d 2024-11-20T22:25:17,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/1dccf5d3cb264456a524b55988a4b765 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/1dccf5d3cb264456a524b55988a4b765 2024-11-20T22:25:17,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/388c82c086b24d618c2604a0a1e40a6b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/388c82c086b24d618c2604a0a1e40a6b 2024-11-20T22:25:17,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/5ecb6b8309c9420e9716c157af900573 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/5ecb6b8309c9420e9716c157af900573 2024-11-20T22:25:17,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ed71e2f16e9e49c59bc2398a0feb313d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ed71e2f16e9e49c59bc2398a0feb313d 2024-11-20T22:25:17,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/587d88b097024d5c8cb7962a4adadab5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/587d88b097024d5c8cb7962a4adadab5 2024-11-20T22:25:17,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/4477c274c1544af197e805857caec6b0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/4477c274c1544af197e805857caec6b0 2024-11-20T22:25:17,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/6bcc6ec955434ac385a2aaf6b124ebda to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/6bcc6ec955434ac385a2aaf6b124ebda 2024-11-20T22:25:17,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/1a32703b4e6d41cdb460f5a1f04124f2 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/1a32703b4e6d41cdb460f5a1f04124f2 2024-11-20T22:25:17,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/24dcffa52bd74b90808454e9d0c7ab85 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/24dcffa52bd74b90808454e9d0c7ab85 2024-11-20T22:25:17,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/fbc18ab77f214742b8fba1ae0ec4a21c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/fbc18ab77f214742b8fba1ae0ec4a21c 2024-11-20T22:25:17,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ac9c06fec7e74d82952e7d3fba1caa6c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ac9c06fec7e74d82952e7d3fba1caa6c 2024-11-20T22:25:17,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/249a4a41e29042c388a20edc108ec8e9 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/249a4a41e29042c388a20edc108ec8e9 2024-11-20T22:25:17,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ec6a3094930349198f41080d146429f4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/ec6a3094930349198f41080d146429f4 2024-11-20T22:25:17,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/034b36929fd044e7a697cbb57c1d4d07 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/034b36929fd044e7a697cbb57c1d4d07 2024-11-20T22:25:17,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/95e211dbea8543b5a9d739e1bd2443de to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/95e211dbea8543b5a9d739e1bd2443de 2024-11-20T22:25:17,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/e1b7816af35b4cddad658b12a56681dd to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/e1b7816af35b4cddad658b12a56681dd 2024-11-20T22:25:17,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/176856fc7c024c2a957b346e702e2023 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/176856fc7c024c2a957b346e702e2023 2024-11-20T22:25:17,442 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/recovered.edits/446.seqid, newMaxSeqId=446, maxSeqId=4 2024-11-20T22:25:17,442 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a. 2024-11-20T22:25:17,442 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1635): Region close journal for bcb32e1c322c877c5cddb0d2b8bcab6a: 2024-11-20T22:25:17,444 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(170): Closed bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,444 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=bcb32e1c322c877c5cddb0d2b8bcab6a, regionState=CLOSED 2024-11-20T22:25:17,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-20T22:25:17,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseRegionProcedure bcb32e1c322c877c5cddb0d2b8bcab6a, server=6365a1e51efd,44631,1732141399950 in 1.7800 sec 2024-11-20T22:25:17,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-11-20T22:25:17,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bcb32e1c322c877c5cddb0d2b8bcab6a, UNASSIGN in 1.7840 sec 2024-11-20T22:25:17,448 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-20T22:25:17,449 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.7880 sec 2024-11-20T22:25:17,449 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141517449"}]},"ts":"1732141517449"} 2024-11-20T22:25:17,450 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T22:25:17,483 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T22:25:17,486 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8370 sec 2024-11-20T22:25:17,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T22:25:17,771 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-20T22:25:17,772 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T22:25:17,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:17,795 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:17,796 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=124, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:17,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T22:25:17,830 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,855 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/recovered.edits] 2024-11-20T22:25:17,883 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/6dada790e7e9456891da0cc25e2c03d2 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/6dada790e7e9456891da0cc25e2c03d2 2024-11-20T22:25:17,898 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/c0d0703a3a75445e805ece8c89fcae16 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/c0d0703a3a75445e805ece8c89fcae16 2024-11-20T22:25:17,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T22:25:17,911 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fef0ef5a7c3947569e8f5c923b7504a6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/A/fef0ef5a7c3947569e8f5c923b7504a6 2024-11-20T22:25:17,913 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1762f7bf995648e6a2ba6ce2ae9fab7f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/1762f7bf995648e6a2ba6ce2ae9fab7f 2024-11-20T22:25:17,913 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/a1cc7260b3d04152b79400f15d387356 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/a1cc7260b3d04152b79400f15d387356 2024-11-20T22:25:17,914 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/bef7961b4b92442e9d24c9e3e9893c2e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/B/bef7961b4b92442e9d24c9e3e9893c2e 2024-11-20T22:25:17,916 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/999cc41a2072466789fd7a5f7b1c85cf to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/999cc41a2072466789fd7a5f7b1c85cf 2024-11-20T22:25:17,917 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/a7adfb25e9cb41d093e60f3d86570d89 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/a7adfb25e9cb41d093e60f3d86570d89 2024-11-20T22:25:17,918 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/b49bd3fd4a924214a708d764523f4bff to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/C/b49bd3fd4a924214a708d764523f4bff 2024-11-20T22:25:17,921 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/recovered.edits/446.seqid to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a/recovered.edits/446.seqid 2024-11-20T22:25:17,922 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,926 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T22:25:17,928 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T22:25:17,930 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T22:25:17,937 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112001dba192a8ca4f83801bfbc33e74cda4_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112001dba192a8ca4f83801bfbc33e74cda4_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,939 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201225a9baf07d41ffad3746e083d722d7_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201225a9baf07d41ffad3746e083d722d7_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,941 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201d07a12afe0c4bd1a777d304259ca9fe_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201d07a12afe0c4bd1a777d304259ca9fe_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,942 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112020470c7c9719483687ff6a6c78f40d48_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112020470c7c9719483687ff6a6c78f40d48_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,943 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f629eca87eb4c85906a46f11d1f7a8a_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f629eca87eb4c85906a46f11d1f7a8a_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,945 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120312e2a1f6b5f4d4691d39f02286c566a_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120312e2a1f6b5f4d4691d39f02286c566a_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,947 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120344b5bae559441818650bfae1e0801f6_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120344b5bae559441818650bfae1e0801f6_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,948 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112044a830d7b97b4f30b29d72419688ecb3_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112044a830d7b97b4f30b29d72419688ecb3_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,949 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120482073ac44aa47beaac7ed641be24201_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120482073ac44aa47beaac7ed641be24201_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,950 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120606801d675d14bb18fb490107c03d8f2_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120606801d675d14bb18fb490107c03d8f2_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,951 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120690b3f0a910246af97f2312ff943daeb_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120690b3f0a910246af97f2312ff943daeb_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,952 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207c44e30dda7d4dfbab9fcbab784fb22e_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207c44e30dda7d4dfbab9fcbab784fb22e_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,954 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209588e3f63dee414b8f394925c9a1edbd_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209588e3f63dee414b8f394925c9a1edbd_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,960 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a0b0194b35aa49ef882485ad260c61a7_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a0b0194b35aa49ef882485ad260c61a7_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,967 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a6a7989f4f71485fae706504fc499227_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a6a7989f4f71485fae706504fc499227_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,968 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a74d0d4da24240578b31854b5b0affbe_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a74d0d4da24240578b31854b5b0affbe_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,969 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a80ddbfce126492fbb7f40fe21cf95a9_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a80ddbfce126492fbb7f40fe21cf95a9_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,970 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aa5ba3d8869e486eb418d09d3a77e611_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aa5ba3d8869e486eb418d09d3a77e611_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,975 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cadb62c082ca4f12b6b806765318c58f_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cadb62c082ca4f12b6b806765318c58f_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,976 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ce6f9795f175412094550a2d736466f1_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ce6f9795f175412094550a2d736466f1_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,977 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e8445944f2e244f2b39e97de3ba4290e_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e8445944f2e244f2b39e97de3ba4290e_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,978 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120eb161503a6ed45c2adcecba8617328b0_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120eb161503a6ed45c2adcecba8617328b0_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,990 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb02584d023a40088d68891f0517e0fb_bcb32e1c322c877c5cddb0d2b8bcab6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb02584d023a40088d68891f0517e0fb_bcb32e1c322c877c5cddb0d2b8bcab6a 2024-11-20T22:25:17,995 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T22:25:18,006 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=124, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:18,008 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T22:25:18,021 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T22:25:18,022 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=124, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:18,022 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T22:25:18,023 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732141518022"}]},"ts":"9223372036854775807"} 2024-11-20T22:25:18,028 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T22:25:18,028 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => bcb32e1c322c877c5cddb0d2b8bcab6a, NAME => 'TestAcidGuarantees,,1732141490095.bcb32e1c322c877c5cddb0d2b8bcab6a.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T22:25:18,028 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T22:25:18,028 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732141518028"}]},"ts":"9223372036854775807"} 2024-11-20T22:25:18,032 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T22:25:18,084 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=124, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:18,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 312 msec 2024-11-20T22:25:18,088 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T22:25:18,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T22:25:18,101 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-20T22:25:18,127 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=240 (was 239) - Thread LEAK? -, OpenFileDescriptor=461 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1037 (was 1069), ProcessCount=11 (was 11), AvailableMemoryMB=2548 (was 1948) - AvailableMemoryMB LEAK? - 2024-11-20T22:25:18,142 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=240, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=1037, ProcessCount=11, AvailableMemoryMB=2546 2024-11-20T22:25:18,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:25:18,144 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:25:18,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:18,146 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:25:18,146 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:18,146 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 125 2024-11-20T22:25:18,147 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:25:18,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-20T22:25:18,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742279_1455 (size=960) 2024-11-20T22:25:18,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-20T22:25:18,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-20T22:25:18,567 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a 2024-11-20T22:25:18,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742280_1456 (size=53) 2024-11-20T22:25:18,572 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:25:18,572 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6f10e22f8d7346b15976f24fa4b38050, disabling compactions & flushes 2024-11-20T22:25:18,572 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:18,572 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:18,572 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. after waiting 0 ms 2024-11-20T22:25:18,572 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:18,572 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:18,572 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:18,574 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:25:18,574 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732141518574"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141518574"}]},"ts":"1732141518574"} 2024-11-20T22:25:18,575 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:25:18,575 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:25:18,575 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141518575"}]},"ts":"1732141518575"} 2024-11-20T22:25:18,576 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T22:25:18,636 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6f10e22f8d7346b15976f24fa4b38050, ASSIGN}] 2024-11-20T22:25:18,638 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6f10e22f8d7346b15976f24fa4b38050, ASSIGN 2024-11-20T22:25:18,639 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6f10e22f8d7346b15976f24fa4b38050, ASSIGN; state=OFFLINE, location=6365a1e51efd,44631,1732141399950; forceNewPlan=false, retain=false 2024-11-20T22:25:18,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-20T22:25:18,789 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=6f10e22f8d7346b15976f24fa4b38050, regionState=OPENING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:18,791 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; OpenRegionProcedure 6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:25:18,943 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:18,946 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:18,946 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7285): Opening region: {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:25:18,946 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:18,946 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:25:18,947 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7327): checking encryption for 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:18,947 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7330): checking classloading for 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:18,948 INFO [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:18,949 INFO [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:18,949 INFO [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6f10e22f8d7346b15976f24fa4b38050 columnFamilyName A 2024-11-20T22:25:18,949 DEBUG [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:18,950 INFO [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] regionserver.HStore(327): Store=6f10e22f8d7346b15976f24fa4b38050/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:18,950 INFO [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:18,952 INFO [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:18,953 INFO [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6f10e22f8d7346b15976f24fa4b38050 columnFamilyName B 2024-11-20T22:25:18,953 DEBUG [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:18,953 INFO [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] regionserver.HStore(327): Store=6f10e22f8d7346b15976f24fa4b38050/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:18,953 INFO [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:18,957 INFO [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:18,958 INFO [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6f10e22f8d7346b15976f24fa4b38050 columnFamilyName C 2024-11-20T22:25:18,958 DEBUG [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:18,958 INFO [StoreOpener-6f10e22f8d7346b15976f24fa4b38050-1 {}] regionserver.HStore(327): Store=6f10e22f8d7346b15976f24fa4b38050/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:18,958 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:18,959 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:18,960 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:18,973 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:25:18,982 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1085): writing seq id for 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:19,001 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:25:19,002 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1102): Opened 6f10e22f8d7346b15976f24fa4b38050; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65149444, jitterRate=-0.029197633266448975}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:25:19,002 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1001): Region open journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:19,004 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., pid=127, masterSystemTime=1732141518943 2024-11-20T22:25:19,006 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:19,006 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:19,007 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=6f10e22f8d7346b15976f24fa4b38050, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,009 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-20T22:25:19,010 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; OpenRegionProcedure 6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 in 217 msec 2024-11-20T22:25:19,011 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-11-20T22:25:19,011 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6f10e22f8d7346b15976f24fa4b38050, ASSIGN in 374 msec 2024-11-20T22:25:19,011 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:25:19,012 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141519012"}]},"ts":"1732141519012"} 2024-11-20T22:25:19,013 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T22:25:19,026 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:25:19,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 882 msec 2024-11-20T22:25:19,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-20T22:25:19,254 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 125 completed 2024-11-20T22:25:19,255 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x59daaa82 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2aaa8c4a 2024-11-20T22:25:19,347 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34614bf6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:19,374 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:19,377 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43972, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:19,387 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:25:19,389 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51832, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:25:19,394 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38dd8644 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@466b85c9 2024-11-20T22:25:19,431 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@786b5809, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:19,432 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65e17c26 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f3ee89e 2024-11-20T22:25:19,459 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d375c60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:19,460 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53fc02ba to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b0e6a43 2024-11-20T22:25:19,476 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cbdf91e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:19,477 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2011d733 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8e5fd00 2024-11-20T22:25:19,514 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc3900b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:19,515 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x39b3baa5 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e195d6e 2024-11-20T22:25:19,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@599dd56e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:19,527 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14088aa9 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@23090be3 2024-11-20T22:25:19,542 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@224e54da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:19,544 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40302925 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b8d64d3 2024-11-20T22:25:19,563 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bb51dfc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:19,564 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47ef9951 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@784d683 2024-11-20T22:25:19,584 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@540d7172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:19,585 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x567011a8 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7761f52b 2024-11-20T22:25:19,625 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48588c54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:19,626 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x02430fee to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a736a20 2024-11-20T22:25:19,676 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76c56316, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:19,680 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:19,680 DEBUG [hconnection-0x684065f9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:19,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-20T22:25:19,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T22:25:19,681 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:19,682 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:19,682 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:19,682 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:19,687 DEBUG [hconnection-0x2efb80ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:19,687 DEBUG [hconnection-0x4d904c72-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:19,689 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:19,690 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44008, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:19,691 DEBUG [hconnection-0x73af8a0e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:19,691 DEBUG [hconnection-0xf7815fc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:19,692 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44022, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:19,692 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44036, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:19,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:19,694 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:25:19,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:19,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:19,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:19,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:19,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:19,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:19,699 DEBUG [hconnection-0x6eea4e57-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:19,699 DEBUG [hconnection-0x2889d722-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:19,700 DEBUG [hconnection-0x4964f417-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:19,700 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44052, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:19,700 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:19,701 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44068, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:19,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141579713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141579713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141579715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141579715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141579716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,735 DEBUG [hconnection-0x1fe11fbd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:19,737 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44080, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:19,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/2bcf51c50fa0460f82e816b716e71721 is 50, key is test_row_0/A:col10/1732141519693/Put/seqid=0 2024-11-20T22:25:19,777 DEBUG [hconnection-0x59d55e13-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:19,779 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44090, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:19,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T22:25:19,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742281_1457 (size=12001) 2024-11-20T22:25:19,800 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/2bcf51c50fa0460f82e816b716e71721 2024-11-20T22:25:19,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141579817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141579817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141579817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141579817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141579817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,834 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T22:25:19,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:19,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:19,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:19,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/1d9d2645985144e088f9af84c4466357 is 50, key is test_row_0/B:col10/1732141519693/Put/seqid=0 2024-11-20T22:25:19,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742282_1458 (size=12001) 2024-11-20T22:25:19,989 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:19,990 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T22:25:19,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T22:25:19,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:19,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:19,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:19,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:20,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141580018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,020 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141580020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141580020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141580021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141580021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,150 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T22:25:20,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:20,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:20,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:20,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:20,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:20,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:20,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/1d9d2645985144e088f9af84c4466357 2024-11-20T22:25:20,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T22:25:20,303 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T22:25:20,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:20,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:20,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:20,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:20,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:20,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:20,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141580323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141580323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141580325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141580326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141580326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,331 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/de49e1443bc24a6c967ef314478faf0a is 50, key is test_row_0/C:col10/1732141519693/Put/seqid=0 2024-11-20T22:25:20,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742283_1459 (size=12001) 2024-11-20T22:25:20,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/de49e1443bc24a6c967ef314478faf0a 2024-11-20T22:25:20,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/2bcf51c50fa0460f82e816b716e71721 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2bcf51c50fa0460f82e816b716e71721 2024-11-20T22:25:20,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2bcf51c50fa0460f82e816b716e71721, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T22:25:20,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/1d9d2645985144e088f9af84c4466357 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/1d9d2645985144e088f9af84c4466357 2024-11-20T22:25:20,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/1d9d2645985144e088f9af84c4466357, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T22:25:20,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/de49e1443bc24a6c967ef314478faf0a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/de49e1443bc24a6c967ef314478faf0a 2024-11-20T22:25:20,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/de49e1443bc24a6c967ef314478faf0a, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T22:25:20,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 6f10e22f8d7346b15976f24fa4b38050 in 707ms, sequenceid=13, compaction requested=false 2024-11-20T22:25:20,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:20,456 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T22:25:20,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:20,457 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T22:25:20,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:20,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:20,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:20,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:20,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:20,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:20,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/abad919c10c34f2493d0a29a7bc6772b is 50, key is test_row_0/A:col10/1732141519711/Put/seqid=0 2024-11-20T22:25:20,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742284_1460 (size=12001) 2024-11-20T22:25:20,497 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/abad919c10c34f2493d0a29a7bc6772b 2024-11-20T22:25:20,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/5ebdf9c9d7f64f0184af529b5bb1aff8 is 50, key is test_row_0/B:col10/1732141519711/Put/seqid=0 2024-11-20T22:25:20,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742285_1461 (size=12001) 2024-11-20T22:25:20,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T22:25:20,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:20,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:20,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141580847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141580849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141580851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141580851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141580854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,948 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/5ebdf9c9d7f64f0184af529b5bb1aff8 2024-11-20T22:25:20,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/9c8c9887614c4699b7b959ca88da0a95 is 50, key is test_row_0/C:col10/1732141519711/Put/seqid=0 2024-11-20T22:25:20,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141580961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141580962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141580963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141580965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:20,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141580961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742286_1462 (size=12001) 2024-11-20T22:25:21,031 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/9c8c9887614c4699b7b959ca88da0a95 2024-11-20T22:25:21,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/abad919c10c34f2493d0a29a7bc6772b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/abad919c10c34f2493d0a29a7bc6772b 2024-11-20T22:25:21,039 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/abad919c10c34f2493d0a29a7bc6772b, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T22:25:21,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/5ebdf9c9d7f64f0184af529b5bb1aff8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/5ebdf9c9d7f64f0184af529b5bb1aff8 2024-11-20T22:25:21,053 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/5ebdf9c9d7f64f0184af529b5bb1aff8, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T22:25:21,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/9c8c9887614c4699b7b959ca88da0a95 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/9c8c9887614c4699b7b959ca88da0a95 2024-11-20T22:25:21,062 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/9c8c9887614c4699b7b959ca88da0a95, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T22:25:21,063 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6f10e22f8d7346b15976f24fa4b38050 in 606ms, sequenceid=39, compaction requested=false 2024-11-20T22:25:21,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:21,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:21,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-20T22:25:21,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-20T22:25:21,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-20T22:25:21,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3860 sec 2024-11-20T22:25:21,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.3900 sec 2024-11-20T22:25:21,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:25:21,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:21,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:21,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:21,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:21,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:21,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:21,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:21,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/a3fca6c0ef334e4cbfb4e1c1dcb217a0 is 50, key is test_row_0/A:col10/1732141520851/Put/seqid=0 2024-11-20T22:25:21,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742287_1463 (size=12001) 2024-11-20T22:25:21,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141581255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141581259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141581266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141581268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141581269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141581378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141581382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141581382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141581385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141581388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141581600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141581602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141581603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141581603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141581622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,660 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/a3fca6c0ef334e4cbfb4e1c1dcb217a0 2024-11-20T22:25:21,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/68a2fe6c15974c1dbdd92fce72207d42 is 50, key is test_row_0/B:col10/1732141520851/Put/seqid=0 2024-11-20T22:25:21,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742288_1464 (size=12001) 2024-11-20T22:25:21,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/68a2fe6c15974c1dbdd92fce72207d42 2024-11-20T22:25:21,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/2e9047ab207f4810bc440cbab76f7c10 is 50, key is test_row_0/C:col10/1732141520851/Put/seqid=0 2024-11-20T22:25:21,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742289_1465 (size=12001) 2024-11-20T22:25:21,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/2e9047ab207f4810bc440cbab76f7c10 2024-11-20T22:25:21,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/a3fca6c0ef334e4cbfb4e1c1dcb217a0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a3fca6c0ef334e4cbfb4e1c1dcb217a0 2024-11-20T22:25:21,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T22:25:21,795 INFO [Thread-2042 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-20T22:25:21,798 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:21,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a3fca6c0ef334e4cbfb4e1c1dcb217a0, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T22:25:21,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-20T22:25:21,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T22:25:21,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/68a2fe6c15974c1dbdd92fce72207d42 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/68a2fe6c15974c1dbdd92fce72207d42 2024-11-20T22:25:21,799 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:21,800 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:21,800 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:21,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/68a2fe6c15974c1dbdd92fce72207d42, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T22:25:21,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/2e9047ab207f4810bc440cbab76f7c10 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/2e9047ab207f4810bc440cbab76f7c10 2024-11-20T22:25:21,812 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/2e9047ab207f4810bc440cbab76f7c10, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T22:25:21,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6f10e22f8d7346b15976f24fa4b38050 in 622ms, sequenceid=51, compaction requested=true 2024-11-20T22:25:21,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:21,814 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:21,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:21,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:21,814 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:21,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:21,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:21,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:21,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:21,815 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:21,815 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/A is initiating minor compaction (all files) 2024-11-20T22:25:21,815 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/A in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:21,815 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2bcf51c50fa0460f82e816b716e71721, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/abad919c10c34f2493d0a29a7bc6772b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a3fca6c0ef334e4cbfb4e1c1dcb217a0] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=35.2 K 2024-11-20T22:25:21,815 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:21,815 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bcf51c50fa0460f82e816b716e71721, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732141519692 2024-11-20T22:25:21,816 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/B is initiating minor compaction (all files) 2024-11-20T22:25:21,816 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/B in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:21,816 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/1d9d2645985144e088f9af84c4466357, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/5ebdf9c9d7f64f0184af529b5bb1aff8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/68a2fe6c15974c1dbdd92fce72207d42] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=35.2 K 2024-11-20T22:25:21,816 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting abad919c10c34f2493d0a29a7bc6772b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732141519711 2024-11-20T22:25:21,816 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d9d2645985144e088f9af84c4466357, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732141519692 2024-11-20T22:25:21,817 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ebdf9c9d7f64f0184af529b5bb1aff8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732141519711 2024-11-20T22:25:21,817 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting a3fca6c0ef334e4cbfb4e1c1dcb217a0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141520841 2024-11-20T22:25:21,818 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68a2fe6c15974c1dbdd92fce72207d42, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141520841 2024-11-20T22:25:21,827 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#A#compaction#381 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:21,828 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/17bc300bc24d4e2e8dc6ed5a249d99c1 is 50, key is test_row_0/A:col10/1732141520851/Put/seqid=0 2024-11-20T22:25:21,834 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#B#compaction#382 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:21,834 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/27a1acff636149df902fc703c6740a0a is 50, key is test_row_0/B:col10/1732141520851/Put/seqid=0 2024-11-20T22:25:21,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742290_1466 (size=12104) 2024-11-20T22:25:21,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742291_1467 (size=12104) 2024-11-20T22:25:21,864 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/17bc300bc24d4e2e8dc6ed5a249d99c1 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/17bc300bc24d4e2e8dc6ed5a249d99c1 2024-11-20T22:25:21,869 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/A of 6f10e22f8d7346b15976f24fa4b38050 into 17bc300bc24d4e2e8dc6ed5a249d99c1(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:21,869 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:21,869 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/A, priority=13, startTime=1732141521814; duration=0sec 2024-11-20T22:25:21,869 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:21,869 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:A 2024-11-20T22:25:21,869 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:21,870 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:21,870 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/C is initiating minor compaction (all files) 2024-11-20T22:25:21,870 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/C in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:21,870 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/de49e1443bc24a6c967ef314478faf0a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/9c8c9887614c4699b7b959ca88da0a95, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/2e9047ab207f4810bc440cbab76f7c10] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=35.2 K 2024-11-20T22:25:21,871 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting de49e1443bc24a6c967ef314478faf0a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732141519692 2024-11-20T22:25:21,872 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c8c9887614c4699b7b959ca88da0a95, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732141519711 2024-11-20T22:25:21,872 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e9047ab207f4810bc440cbab76f7c10, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141520841 2024-11-20T22:25:21,881 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#C#compaction#383 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:21,882 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/bb1909c36c2344b49eddd48a8f5dc609 is 50, key is test_row_0/C:col10/1732141520851/Put/seqid=0 2024-11-20T22:25:21,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T22:25:21,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742292_1468 (size=12104) 2024-11-20T22:25:21,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:21,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:25:21,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:21,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:21,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:21,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:21,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:21,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:21,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6ce23a98c6dc40d9858b6476794d38b7 is 50, key is test_row_0/A:col10/1732141521249/Put/seqid=0 2024-11-20T22:25:21,953 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T22:25:21,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:21,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:21,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:21,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:21,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:21,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:21,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141581952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141581959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141581960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742293_1469 (size=14341) 2024-11-20T22:25:21,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141581975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:21,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141581972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,040 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2bcf51c50fa0460f82e816b716e71721, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/abad919c10c34f2493d0a29a7bc6772b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a3fca6c0ef334e4cbfb4e1c1dcb217a0] to archive 2024-11-20T22:25:22,044 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:22,050 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2bcf51c50fa0460f82e816b716e71721 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2bcf51c50fa0460f82e816b716e71721 2024-11-20T22:25:22,053 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/abad919c10c34f2493d0a29a7bc6772b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/abad919c10c34f2493d0a29a7bc6772b 2024-11-20T22:25:22,054 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a3fca6c0ef334e4cbfb4e1c1dcb217a0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a3fca6c0ef334e4cbfb4e1c1dcb217a0 2024-11-20T22:25:22,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141582075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141582076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141582078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141582090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141582093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T22:25:22,108 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T22:25:22,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:22,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:22,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:22,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,267 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T22:25:22,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:22,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:22,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:22,267 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,270 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/27a1acff636149df902fc703c6740a0a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/27a1acff636149df902fc703c6740a0a 2024-11-20T22:25:22,280 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/B of 6f10e22f8d7346b15976f24fa4b38050 into 27a1acff636149df902fc703c6740a0a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:22,280 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:22,280 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/B, priority=13, startTime=1732141521814; duration=0sec 2024-11-20T22:25:22,280 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:22,280 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:B 2024-11-20T22:25:22,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141582284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141582288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141582288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141582302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141582302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,354 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/bb1909c36c2344b49eddd48a8f5dc609 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/bb1909c36c2344b49eddd48a8f5dc609 2024-11-20T22:25:22,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6ce23a98c6dc40d9858b6476794d38b7 2024-11-20T22:25:22,396 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/C of 6f10e22f8d7346b15976f24fa4b38050 into bb1909c36c2344b49eddd48a8f5dc609(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:22,396 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:22,396 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/C, priority=13, startTime=1732141521815; duration=0sec 2024-11-20T22:25:22,396 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:22,396 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:C 2024-11-20T22:25:22,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T22:25:22,408 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/5f5347cd339540ad8d8de3ba20b66985 is 50, key is test_row_0/B:col10/1732141521249/Put/seqid=0 2024-11-20T22:25:22,436 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T22:25:22,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:22,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:22,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:22,439 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742294_1470 (size=12001) 2024-11-20T22:25:22,450 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/5f5347cd339540ad8d8de3ba20b66985 2024-11-20T22:25:22,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/0df9ed5d562d446db72eb7d3a578100f is 50, key is test_row_0/C:col10/1732141521249/Put/seqid=0 2024-11-20T22:25:22,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742295_1471 (size=12001) 2024-11-20T22:25:22,557 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/0df9ed5d562d446db72eb7d3a578100f 2024-11-20T22:25:22,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6ce23a98c6dc40d9858b6476794d38b7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6ce23a98c6dc40d9858b6476794d38b7 2024-11-20T22:25:22,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6ce23a98c6dc40d9858b6476794d38b7, entries=200, sequenceid=77, filesize=14.0 K 2024-11-20T22:25:22,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/5f5347cd339540ad8d8de3ba20b66985 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/5f5347cd339540ad8d8de3ba20b66985 2024-11-20T22:25:22,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/5f5347cd339540ad8d8de3ba20b66985, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T22:25:22,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/0df9ed5d562d446db72eb7d3a578100f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0df9ed5d562d446db72eb7d3a578100f 2024-11-20T22:25:22,599 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0df9ed5d562d446db72eb7d3a578100f, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T22:25:22,603 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T22:25:22,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:22,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:22,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:22,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,611 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 6f10e22f8d7346b15976f24fa4b38050 in 683ms, sequenceid=77, compaction requested=false 2024-11-20T22:25:22,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:22,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:22,612 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:25:22,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:22,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:22,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:22,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:22,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:22,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:22,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6abece6dbcc54d7ea52aae28a9129022 is 50, key is test_row_0/A:col10/1732141521965/Put/seqid=0 2024-11-20T22:25:22,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742296_1472 (size=11997) 2024-11-20T22:25:22,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6abece6dbcc54d7ea52aae28a9129022 2024-11-20T22:25:22,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/c8eb15a5e20a479693c37c68a9bd86e5 is 50, key is test_row_0/B:col10/1732141521965/Put/seqid=0 2024-11-20T22:25:22,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141582687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141582675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742297_1473 (size=9657) 2024-11-20T22:25:22,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/c8eb15a5e20a479693c37c68a9bd86e5 2024-11-20T22:25:22,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141582708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141582708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141582708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/a72fcd91c61f4559a59c5534ae03c6dc is 50, key is test_row_0/C:col10/1732141521965/Put/seqid=0 2024-11-20T22:25:22,767 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T22:25:22,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:22,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:22,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:22,771 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742298_1474 (size=9657) 2024-11-20T22:25:22,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141582813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141582815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141582838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141582838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141582838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T22:25:22,924 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:22,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T22:25:22,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:22,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:22,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:22,925 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:22,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:23,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141583031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141583034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,051 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T22:25:23,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141583046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141583047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141583051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,077 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T22:25:23,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:23,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:23,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:23,078 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:23,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:23,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:23,189 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/a72fcd91c61f4559a59c5534ae03c6dc 2024-11-20T22:25:23,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6abece6dbcc54d7ea52aae28a9129022 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6abece6dbcc54d7ea52aae28a9129022 2024-11-20T22:25:23,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6abece6dbcc54d7ea52aae28a9129022, entries=150, sequenceid=92, filesize=11.7 K 2024-11-20T22:25:23,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/c8eb15a5e20a479693c37c68a9bd86e5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c8eb15a5e20a479693c37c68a9bd86e5 2024-11-20T22:25:23,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c8eb15a5e20a479693c37c68a9bd86e5, entries=100, sequenceid=92, filesize=9.4 K 2024-11-20T22:25:23,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/a72fcd91c61f4559a59c5534ae03c6dc as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/a72fcd91c61f4559a59c5534ae03c6dc 2024-11-20T22:25:23,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/a72fcd91c61f4559a59c5534ae03c6dc, entries=100, sequenceid=92, filesize=9.4 K 2024-11-20T22:25:23,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6f10e22f8d7346b15976f24fa4b38050 in 617ms, sequenceid=92, compaction requested=true 2024-11-20T22:25:23,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:23,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:23,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:23,229 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:23,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:23,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:23,229 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:23,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:23,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:23,230 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:23,230 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/A is initiating minor compaction (all files) 2024-11-20T22:25:23,230 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/A in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:23,230 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/17bc300bc24d4e2e8dc6ed5a249d99c1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6ce23a98c6dc40d9858b6476794d38b7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6abece6dbcc54d7ea52aae28a9129022] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=37.5 K 2024-11-20T22:25:23,230 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17bc300bc24d4e2e8dc6ed5a249d99c1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141520841 2024-11-20T22:25:23,231 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ce23a98c6dc40d9858b6476794d38b7, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732141521249 2024-11-20T22:25:23,231 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6abece6dbcc54d7ea52aae28a9129022, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732141521965 2024-11-20T22:25:23,232 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:23,232 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/B is initiating minor compaction (all files) 2024-11-20T22:25:23,232 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/B in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:23,232 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/27a1acff636149df902fc703c6740a0a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/5f5347cd339540ad8d8de3ba20b66985, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c8eb15a5e20a479693c37c68a9bd86e5] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=33.0 K 2024-11-20T22:25:23,232 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 27a1acff636149df902fc703c6740a0a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141520841 2024-11-20T22:25:23,232 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f5347cd339540ad8d8de3ba20b66985, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732141521249 2024-11-20T22:25:23,233 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c8eb15a5e20a479693c37c68a9bd86e5, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732141521965 2024-11-20T22:25:23,237 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T22:25:23,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:23,238 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:25:23,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:23,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:23,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:23,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:23,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:23,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:23,259 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#A#compaction#390 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:23,259 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/20af2dc8221e40ddb22a284d81f20e4c is 50, key is test_row_0/A:col10/1732141521965/Put/seqid=0 2024-11-20T22:25:23,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6aef765046184848935dd61868367f9b is 50, key is test_row_0/A:col10/1732141522700/Put/seqid=0 2024-11-20T22:25:23,263 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#B#compaction#392 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:23,263 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/89622cdcbd554d9ea719f9880a913082 is 50, key is test_row_0/B:col10/1732141521965/Put/seqid=0 2024-11-20T22:25:23,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742299_1475 (size=12104) 2024-11-20T22:25:23,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742301_1477 (size=12207) 2024-11-20T22:25:23,289 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/89622cdcbd554d9ea719f9880a913082 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/89622cdcbd554d9ea719f9880a913082 2024-11-20T22:25:23,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742300_1476 (size=12001) 2024-11-20T22:25:23,299 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/B of 6f10e22f8d7346b15976f24fa4b38050 into 89622cdcbd554d9ea719f9880a913082(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:23,299 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:23,299 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/B, priority=13, startTime=1732141523229; duration=0sec 2024-11-20T22:25:23,299 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:23,299 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:B 2024-11-20T22:25:23,299 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:23,299 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6aef765046184848935dd61868367f9b 2024-11-20T22:25:23,300 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:23,300 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/C is initiating minor compaction (all files) 2024-11-20T22:25:23,300 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/C in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:23,300 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/bb1909c36c2344b49eddd48a8f5dc609, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0df9ed5d562d446db72eb7d3a578100f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/a72fcd91c61f4559a59c5534ae03c6dc] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=33.0 K 2024-11-20T22:25:23,301 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting bb1909c36c2344b49eddd48a8f5dc609, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141520841 2024-11-20T22:25:23,301 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0df9ed5d562d446db72eb7d3a578100f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732141521249 2024-11-20T22:25:23,302 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting a72fcd91c61f4559a59c5534ae03c6dc, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732141521965 2024-11-20T22:25:23,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/a880c0a993754332b3835be1cd80c709 is 50, key is test_row_0/B:col10/1732141522700/Put/seqid=0 2024-11-20T22:25:23,324 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#C#compaction#394 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:23,324 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/8b6b7780637c4bdda5486f711f40a53d is 50, key is test_row_0/C:col10/1732141521965/Put/seqid=0 2024-11-20T22:25:23,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742302_1478 (size=12001) 2024-11-20T22:25:23,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:23,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:23,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742303_1479 (size=12207) 2024-11-20T22:25:23,376 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/8b6b7780637c4bdda5486f711f40a53d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8b6b7780637c4bdda5486f711f40a53d 2024-11-20T22:25:23,382 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/C of 6f10e22f8d7346b15976f24fa4b38050 into 8b6b7780637c4bdda5486f711f40a53d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:23,382 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:23,382 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/C, priority=13, startTime=1732141523229; duration=0sec 2024-11-20T22:25:23,383 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:23,383 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:C 2024-11-20T22:25:23,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141583379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141583379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141583385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141583385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141583387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141583490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141583490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141583490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141583497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141583498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,684 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/20af2dc8221e40ddb22a284d81f20e4c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/20af2dc8221e40ddb22a284d81f20e4c 2024-11-20T22:25:23,693 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/A of 6f10e22f8d7346b15976f24fa4b38050 into 20af2dc8221e40ddb22a284d81f20e4c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:23,693 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:23,693 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/A, priority=13, startTime=1732141523229; duration=0sec 2024-11-20T22:25:23,693 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:23,693 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:A 2024-11-20T22:25:23,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141583698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141583699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141583700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141583704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141583705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:23,749 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/a880c0a993754332b3835be1cd80c709 2024-11-20T22:25:23,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/b6861bc5332a49a4a36accdda92149aa is 50, key is test_row_0/C:col10/1732141522700/Put/seqid=0 2024-11-20T22:25:23,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742304_1480 (size=12001) 2024-11-20T22:25:23,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T22:25:24,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141584014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141584015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141584020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141584020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141584023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,240 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/b6861bc5332a49a4a36accdda92149aa 2024-11-20T22:25:24,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6aef765046184848935dd61868367f9b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6aef765046184848935dd61868367f9b 2024-11-20T22:25:24,283 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6aef765046184848935dd61868367f9b, entries=150, sequenceid=115, filesize=11.7 K 2024-11-20T22:25:24,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/a880c0a993754332b3835be1cd80c709 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/a880c0a993754332b3835be1cd80c709 2024-11-20T22:25:24,290 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/a880c0a993754332b3835be1cd80c709, entries=150, sequenceid=115, filesize=11.7 K 2024-11-20T22:25:24,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/b6861bc5332a49a4a36accdda92149aa as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/b6861bc5332a49a4a36accdda92149aa 2024-11-20T22:25:24,304 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/b6861bc5332a49a4a36accdda92149aa, entries=150, sequenceid=115, filesize=11.7 K 2024-11-20T22:25:24,305 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6f10e22f8d7346b15976f24fa4b38050 in 1067ms, sequenceid=115, compaction requested=false 2024-11-20T22:25:24,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:24,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:24,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-20T22:25:24,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-20T22:25:24,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-20T22:25:24,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5070 sec 2024-11-20T22:25:24,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.5130 sec 2024-11-20T22:25:24,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:25:24,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:24,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:24,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:24,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:24,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:24,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:24,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/ecd7560727ed474abbb245670d3e8e4f is 50, key is test_row_0/A:col10/1732141524531/Put/seqid=0 2024-11-20T22:25:24,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742305_1481 (size=14441) 2024-11-20T22:25:24,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/ecd7560727ed474abbb245670d3e8e4f 2024-11-20T22:25:24,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/72f815ac791b4771b1226bb9805703ed is 50, key is test_row_0/B:col10/1732141524531/Put/seqid=0 2024-11-20T22:25:24,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141584594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141584597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141584598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141584602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141584612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742306_1482 (size=12051) 2024-11-20T22:25:24,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/72f815ac791b4771b1226bb9805703ed 2024-11-20T22:25:24,688 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/24a53feed4de4081aa851ba2b36fd10f is 50, key is test_row_0/C:col10/1732141524531/Put/seqid=0 2024-11-20T22:25:24,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742307_1483 (size=12051) 2024-11-20T22:25:24,704 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/24a53feed4de4081aa851ba2b36fd10f 2024-11-20T22:25:24,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/ecd7560727ed474abbb245670d3e8e4f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ecd7560727ed474abbb245670d3e8e4f 2024-11-20T22:25:24,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141584714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141584714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ecd7560727ed474abbb245670d3e8e4f, entries=200, sequenceid=132, filesize=14.1 K 2024-11-20T22:25:24,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141584714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141584715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/72f815ac791b4771b1226bb9805703ed as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/72f815ac791b4771b1226bb9805703ed 2024-11-20T22:25:24,737 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/72f815ac791b4771b1226bb9805703ed, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T22:25:24,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/24a53feed4de4081aa851ba2b36fd10f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/24a53feed4de4081aa851ba2b36fd10f 2024-11-20T22:25:24,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141584729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,751 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/24a53feed4de4081aa851ba2b36fd10f, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T22:25:24,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6f10e22f8d7346b15976f24fa4b38050 in 220ms, sequenceid=132, compaction requested=true 2024-11-20T22:25:24,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:24,754 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:24,755 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38546 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:24,755 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/A is initiating minor compaction (all files) 2024-11-20T22:25:24,755 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/A in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:24,755 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/20af2dc8221e40ddb22a284d81f20e4c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6aef765046184848935dd61868367f9b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ecd7560727ed474abbb245670d3e8e4f] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=37.6 K 2024-11-20T22:25:24,755 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20af2dc8221e40ddb22a284d81f20e4c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732141521926 2024-11-20T22:25:24,756 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6aef765046184848935dd61868367f9b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732141522698 2024-11-20T22:25:24,756 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting ecd7560727ed474abbb245670d3e8e4f, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141523382 2024-11-20T22:25:24,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:24,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:24,761 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:24,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:24,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:24,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:24,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:24,762 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:24,762 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/B is initiating minor compaction (all files) 2024-11-20T22:25:24,763 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/B in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:24,763 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/89622cdcbd554d9ea719f9880a913082, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/a880c0a993754332b3835be1cd80c709, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/72f815ac791b4771b1226bb9805703ed] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=35.4 K 2024-11-20T22:25:24,763 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 89622cdcbd554d9ea719f9880a913082, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732141521926 2024-11-20T22:25:24,763 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting a880c0a993754332b3835be1cd80c709, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732141522698 2024-11-20T22:25:24,763 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 72f815ac791b4771b1226bb9805703ed, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141523382 2024-11-20T22:25:24,781 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#A#compaction#399 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:24,781 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/a946dcc5b2d84cfbbe96bc1b9b58a4fb is 50, key is test_row_0/A:col10/1732141524531/Put/seqid=0 2024-11-20T22:25:24,794 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#B#compaction#400 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:24,794 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/b1295d2e9577427a9d392c395ba52e4a is 50, key is test_row_0/B:col10/1732141524531/Put/seqid=0 2024-11-20T22:25:24,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742308_1484 (size=12257) 2024-11-20T22:25:24,851 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/a946dcc5b2d84cfbbe96bc1b9b58a4fb as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a946dcc5b2d84cfbbe96bc1b9b58a4fb 2024-11-20T22:25:24,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742309_1485 (size=12359) 2024-11-20T22:25:24,862 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/A of 6f10e22f8d7346b15976f24fa4b38050 into a946dcc5b2d84cfbbe96bc1b9b58a4fb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:24,862 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:24,862 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/A, priority=13, startTime=1732141524754; duration=0sec 2024-11-20T22:25:24,862 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:24,862 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:A 2024-11-20T22:25:24,862 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:24,864 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:24,864 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/C is initiating minor compaction (all files) 2024-11-20T22:25:24,864 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/C in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:24,864 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8b6b7780637c4bdda5486f711f40a53d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/b6861bc5332a49a4a36accdda92149aa, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/24a53feed4de4081aa851ba2b36fd10f] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=35.4 K 2024-11-20T22:25:24,865 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b6b7780637c4bdda5486f711f40a53d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732141521926 2024-11-20T22:25:24,865 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6861bc5332a49a4a36accdda92149aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732141522698 2024-11-20T22:25:24,865 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24a53feed4de4081aa851ba2b36fd10f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141523382 2024-11-20T22:25:24,866 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/b1295d2e9577427a9d392c395ba52e4a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/b1295d2e9577427a9d392c395ba52e4a 2024-11-20T22:25:24,884 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/B of 6f10e22f8d7346b15976f24fa4b38050 into b1295d2e9577427a9d392c395ba52e4a(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:24,884 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:24,884 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/B, priority=13, startTime=1732141524761; duration=0sec 2024-11-20T22:25:24,884 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:24,884 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:B 2024-11-20T22:25:24,896 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#C#compaction#401 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:24,897 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/fa052f34817e476cb10c3390270377f7 is 50, key is test_row_0/C:col10/1732141524531/Put/seqid=0 2024-11-20T22:25:24,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:25:24,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:24,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:24,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:24,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:24,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:24,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:24,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:24,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742310_1486 (size=12359) 2024-11-20T22:25:24,949 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/fa052f34817e476cb10c3390270377f7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/fa052f34817e476cb10c3390270377f7 2024-11-20T22:25:24,949 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/30c131318c14439eb2095682f72441ee is 50, key is test_row_0/A:col10/1732141524608/Put/seqid=0 2024-11-20T22:25:24,958 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/C of 6f10e22f8d7346b15976f24fa4b38050 into fa052f34817e476cb10c3390270377f7(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:24,958 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:24,958 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/C, priority=13, startTime=1732141524761; duration=0sec 2024-11-20T22:25:24,958 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:24,958 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:C 2024-11-20T22:25:24,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141584955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141584962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141584964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141584965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141584966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:24,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742311_1487 (size=14541) 2024-11-20T22:25:24,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/30c131318c14439eb2095682f72441ee 2024-11-20T22:25:25,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/476500c13c4b4e73acde6c4410a2980f is 50, key is test_row_0/B:col10/1732141524608/Put/seqid=0 2024-11-20T22:25:25,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742312_1488 (size=12151) 2024-11-20T22:25:25,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/476500c13c4b4e73acde6c4410a2980f 2024-11-20T22:25:25,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/4d5e209cad1246259dda5ec5322b41ce is 50, key is test_row_0/C:col10/1732141524608/Put/seqid=0 2024-11-20T22:25:25,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141585067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141585068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141585076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141585076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141585076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742313_1489 (size=12151) 2024-11-20T22:25:25,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/4d5e209cad1246259dda5ec5322b41ce 2024-11-20T22:25:25,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/30c131318c14439eb2095682f72441ee as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/30c131318c14439eb2095682f72441ee 2024-11-20T22:25:25,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/30c131318c14439eb2095682f72441ee, entries=200, sequenceid=158, filesize=14.2 K 2024-11-20T22:25:25,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/476500c13c4b4e73acde6c4410a2980f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/476500c13c4b4e73acde6c4410a2980f 2024-11-20T22:25:25,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/476500c13c4b4e73acde6c4410a2980f, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T22:25:25,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/4d5e209cad1246259dda5ec5322b41ce as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/4d5e209cad1246259dda5ec5322b41ce 2024-11-20T22:25:25,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/4d5e209cad1246259dda5ec5322b41ce, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T22:25:25,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 6f10e22f8d7346b15976f24fa4b38050 in 197ms, sequenceid=158, compaction requested=false 2024-11-20T22:25:25,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:25,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:25,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:25:25,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:25,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:25,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:25,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:25,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:25,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:25,288 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/8385af1a6970439cba58b3b87b9931a3 is 50, key is test_row_0/A:col10/1732141524961/Put/seqid=0 2024-11-20T22:25:25,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742314_1490 (size=14541) 2024-11-20T22:25:25,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141585327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141585327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141585333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141585334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141585340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141585441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141585441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141585446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141585447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141585456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141585653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141585656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141585656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141585656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141585667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/8385af1a6970439cba58b3b87b9931a3 2024-11-20T22:25:25,728 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/cb789950f7a94c689b6ab05af0d62149 is 50, key is test_row_0/B:col10/1732141524961/Put/seqid=0 2024-11-20T22:25:25,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742315_1491 (size=12151) 2024-11-20T22:25:25,771 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/cb789950f7a94c689b6ab05af0d62149 2024-11-20T22:25:25,804 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/d4d25fb7022f428194cf794934e503ba is 50, key is test_row_0/C:col10/1732141524961/Put/seqid=0 2024-11-20T22:25:25,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742316_1492 (size=12151) 2024-11-20T22:25:25,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T22:25:25,906 INFO [Thread-2042 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-20T22:25:25,909 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:25,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-20T22:25:25,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T22:25:25,915 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:25,916 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:25,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:25,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141585959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141585966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141585966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141585967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:25,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141585977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T22:25:26,067 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,069 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T22:25:26,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:26,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:26,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:26,070 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:26,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:26,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:26,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T22:25:26,224 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T22:25:26,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:26,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:26,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:26,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:26,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:26,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:26,235 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/d4d25fb7022f428194cf794934e503ba 2024-11-20T22:25:26,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/8385af1a6970439cba58b3b87b9931a3 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/8385af1a6970439cba58b3b87b9931a3 2024-11-20T22:25:26,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/8385af1a6970439cba58b3b87b9931a3, entries=200, sequenceid=173, filesize=14.2 K 2024-11-20T22:25:26,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/cb789950f7a94c689b6ab05af0d62149 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/cb789950f7a94c689b6ab05af0d62149 2024-11-20T22:25:26,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/cb789950f7a94c689b6ab05af0d62149, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T22:25:26,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/d4d25fb7022f428194cf794934e503ba as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/d4d25fb7022f428194cf794934e503ba 2024-11-20T22:25:26,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/d4d25fb7022f428194cf794934e503ba, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T22:25:26,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6f10e22f8d7346b15976f24fa4b38050 in 990ms, sequenceid=173, compaction requested=true 2024-11-20T22:25:26,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:26,263 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:26,264 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41339 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:26,265 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/A is initiating minor compaction (all files) 2024-11-20T22:25:26,265 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/A in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:26,265 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a946dcc5b2d84cfbbe96bc1b9b58a4fb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/30c131318c14439eb2095682f72441ee, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/8385af1a6970439cba58b3b87b9931a3] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=40.4 K 2024-11-20T22:25:26,265 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting a946dcc5b2d84cfbbe96bc1b9b58a4fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141523382 2024-11-20T22:25:26,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:26,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:26,265 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30c131318c14439eb2095682f72441ee, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732141524591 2024-11-20T22:25:26,266 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8385af1a6970439cba58b3b87b9931a3, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141524935 2024-11-20T22:25:26,266 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:26,267 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:26,268 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/B is initiating minor compaction (all files) 2024-11-20T22:25:26,268 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/B in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:26,268 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/b1295d2e9577427a9d392c395ba52e4a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/476500c13c4b4e73acde6c4410a2980f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/cb789950f7a94c689b6ab05af0d62149] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=35.8 K 2024-11-20T22:25:26,268 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting b1295d2e9577427a9d392c395ba52e4a, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141523382 2024-11-20T22:25:26,269 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 476500c13c4b4e73acde6c4410a2980f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732141524608 2024-11-20T22:25:26,269 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting cb789950f7a94c689b6ab05af0d62149, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141524961 2024-11-20T22:25:26,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:26,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:26,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:26,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:26,284 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#A#compaction#408 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:26,286 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/720a03e28ef648e1bc034fd11fe1b75f is 50, key is test_row_0/A:col10/1732141524961/Put/seqid=0 2024-11-20T22:25:26,296 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#B#compaction#409 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:26,297 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/86a5a1e434c84e5cb07dc19ea3239704 is 50, key is test_row_0/B:col10/1732141524961/Put/seqid=0 2024-11-20T22:25:26,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742318_1494 (size=12561) 2024-11-20T22:25:26,336 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/86a5a1e434c84e5cb07dc19ea3239704 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/86a5a1e434c84e5cb07dc19ea3239704 2024-11-20T22:25:26,342 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/B of 6f10e22f8d7346b15976f24fa4b38050 into 86a5a1e434c84e5cb07dc19ea3239704(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:26,342 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:26,342 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/B, priority=13, startTime=1732141526266; duration=0sec 2024-11-20T22:25:26,342 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:26,342 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:B 2024-11-20T22:25:26,342 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:26,343 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:26,343 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/C is initiating minor compaction (all files) 2024-11-20T22:25:26,343 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/C in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:26,344 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/fa052f34817e476cb10c3390270377f7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/4d5e209cad1246259dda5ec5322b41ce, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/d4d25fb7022f428194cf794934e503ba] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=35.8 K 2024-11-20T22:25:26,345 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting fa052f34817e476cb10c3390270377f7, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141523382 2024-11-20T22:25:26,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742317_1493 (size=12459) 2024-11-20T22:25:26,345 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d5e209cad1246259dda5ec5322b41ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732141524608 2024-11-20T22:25:26,345 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting d4d25fb7022f428194cf794934e503ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141524961 2024-11-20T22:25:26,355 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#C#compaction#410 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:26,355 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/0fbda6d3f6c149a8944c423860de4cc8 is 50, key is test_row_0/C:col10/1732141524961/Put/seqid=0 2024-11-20T22:25:26,379 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742319_1495 (size=12561) 2024-11-20T22:25:26,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T22:25:26,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:26,381 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:25:26,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:26,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:26,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:26,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:26,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:26,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:26,400 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/0fbda6d3f6c149a8944c423860de4cc8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0fbda6d3f6c149a8944c423860de4cc8 2024-11-20T22:25:26,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/72f4ad1ad1884396bb4397344779aeeb is 50, key is test_row_0/A:col10/1732141525322/Put/seqid=0 2024-11-20T22:25:26,420 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/C of 6f10e22f8d7346b15976f24fa4b38050 into 0fbda6d3f6c149a8944c423860de4cc8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:26,420 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:26,420 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/C, priority=13, startTime=1732141526276; duration=0sec 2024-11-20T22:25:26,420 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:26,420 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:C 2024-11-20T22:25:26,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742320_1496 (size=12151) 2024-11-20T22:25:26,457 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/72f4ad1ad1884396bb4397344779aeeb 2024-11-20T22:25:26,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:26,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:26,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/ac61ad1f43bb4306b36f8a63702081ef is 50, key is test_row_0/B:col10/1732141525322/Put/seqid=0 2024-11-20T22:25:26,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742321_1497 (size=12151) 2024-11-20T22:25:26,503 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/ac61ad1f43bb4306b36f8a63702081ef 2024-11-20T22:25:26,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141586498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141586506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141586507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141586508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T22:25:26,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/37fbdb7af9d645fca4be0a0deea235fb is 50, key is test_row_0/C:col10/1732141525322/Put/seqid=0 2024-11-20T22:25:26,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141586512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742322_1498 (size=12151) 2024-11-20T22:25:26,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141586614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141586620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141586620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141586620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141586626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,752 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/720a03e28ef648e1bc034fd11fe1b75f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/720a03e28ef648e1bc034fd11fe1b75f 2024-11-20T22:25:26,759 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/A of 6f10e22f8d7346b15976f24fa4b38050 into 720a03e28ef648e1bc034fd11fe1b75f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:26,759 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:26,759 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/A, priority=13, startTime=1732141526263; duration=0sec 2024-11-20T22:25:26,759 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:26,759 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:A 2024-11-20T22:25:26,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141586817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141586827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141586827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141586827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141586833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:26,946 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/37fbdb7af9d645fca4be0a0deea235fb 2024-11-20T22:25:26,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/72f4ad1ad1884396bb4397344779aeeb as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/72f4ad1ad1884396bb4397344779aeeb 2024-11-20T22:25:26,959 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/72f4ad1ad1884396bb4397344779aeeb, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T22:25:26,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/ac61ad1f43bb4306b36f8a63702081ef as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ac61ad1f43bb4306b36f8a63702081ef 2024-11-20T22:25:26,966 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ac61ad1f43bb4306b36f8a63702081ef, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T22:25:26,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/37fbdb7af9d645fca4be0a0deea235fb as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/37fbdb7af9d645fca4be0a0deea235fb 2024-11-20T22:25:26,991 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/37fbdb7af9d645fca4be0a0deea235fb, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T22:25:26,991 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 6f10e22f8d7346b15976f24fa4b38050 in 611ms, sequenceid=196, compaction requested=false 2024-11-20T22:25:26,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:26,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:26,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-20T22:25:26,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-20T22:25:27,007 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-20T22:25:27,007 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0820 sec 2024-11-20T22:25:27,009 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.0990 sec 2024-11-20T22:25:27,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T22:25:27,020 INFO [Thread-2042 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-20T22:25:27,023 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:27,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-20T22:25:27,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:27,027 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:27,027 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:27,027 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:27,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:27,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:25:27,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:27,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:27,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:27,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:27,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:27,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:27,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:27,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/ca2d1b65414544759a12d80349385fea is 50, key is test_row_0/A:col10/1732141526511/Put/seqid=0 2024-11-20T22:25:27,179 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:27,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:27,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:27,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:27,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742323_1499 (size=14541) 2024-11-20T22:25:27,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/ca2d1b65414544759a12d80349385fea 2024-11-20T22:25:27,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/3eb67c122a55445b8774af2ed26a978b is 50, key is test_row_0/B:col10/1732141526511/Put/seqid=0 2024-11-20T22:25:27,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742324_1500 (size=12151) 2024-11-20T22:25:27,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141587173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141587182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141587240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141587240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141587240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:27,334 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:27,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:27,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:27,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:27,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141587341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141587345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141587353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141587353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141587353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,486 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:27,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:27,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:27,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:27,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141587555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141587558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141587564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141587564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141587569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:27,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/3eb67c122a55445b8774af2ed26a978b 2024-11-20T22:25:27,642 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:27,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:27,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:27,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:27,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/d6bb2eeaa5ec496888df661296101b41 is 50, key is test_row_0/C:col10/1732141526511/Put/seqid=0 2024-11-20T22:25:27,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742325_1501 (size=12151) 2024-11-20T22:25:27,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/d6bb2eeaa5ec496888df661296101b41 2024-11-20T22:25:27,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/ca2d1b65414544759a12d80349385fea as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ca2d1b65414544759a12d80349385fea 2024-11-20T22:25:27,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ca2d1b65414544759a12d80349385fea, entries=200, sequenceid=213, filesize=14.2 K 2024-11-20T22:25:27,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/3eb67c122a55445b8774af2ed26a978b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/3eb67c122a55445b8774af2ed26a978b 2024-11-20T22:25:27,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/3eb67c122a55445b8774af2ed26a978b, entries=150, sequenceid=213, filesize=11.9 K 2024-11-20T22:25:27,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/d6bb2eeaa5ec496888df661296101b41 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/d6bb2eeaa5ec496888df661296101b41 2024-11-20T22:25:27,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/d6bb2eeaa5ec496888df661296101b41, entries=150, sequenceid=213, filesize=11.9 K 2024-11-20T22:25:27,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 6f10e22f8d7346b15976f24fa4b38050 in 596ms, sequenceid=213, compaction requested=true 2024-11-20T22:25:27,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:27,732 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:27,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:27,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:27,732 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:27,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:27,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:27,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:27,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:27,732 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39151 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:27,732 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/A is initiating minor compaction (all files) 2024-11-20T22:25:27,733 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/A in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:27,733 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/720a03e28ef648e1bc034fd11fe1b75f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/72f4ad1ad1884396bb4397344779aeeb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ca2d1b65414544759a12d80349385fea] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=38.2 K 2024-11-20T22:25:27,733 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:27,733 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 720a03e28ef648e1bc034fd11fe1b75f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141524961 2024-11-20T22:25:27,733 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/B is initiating minor compaction (all files) 2024-11-20T22:25:27,733 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/B in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:27,733 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/86a5a1e434c84e5cb07dc19ea3239704, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ac61ad1f43bb4306b36f8a63702081ef, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/3eb67c122a55445b8774af2ed26a978b] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=36.0 K 2024-11-20T22:25:27,733 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72f4ad1ad1884396bb4397344779aeeb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732141525322 2024-11-20T22:25:27,733 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 86a5a1e434c84e5cb07dc19ea3239704, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141524961 2024-11-20T22:25:27,734 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca2d1b65414544759a12d80349385fea, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732141526494 2024-11-20T22:25:27,734 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ac61ad1f43bb4306b36f8a63702081ef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732141525322 2024-11-20T22:25:27,734 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 3eb67c122a55445b8774af2ed26a978b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732141526494 2024-11-20T22:25:27,741 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#A#compaction#417 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:27,741 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/e194aa39de1d4eeb837a7386faff706f is 50, key is test_row_0/A:col10/1732141526511/Put/seqid=0 2024-11-20T22:25:27,760 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#B#compaction#418 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:27,761 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/ec2136fb57fc4ce99f89621b45159164 is 50, key is test_row_0/B:col10/1732141526511/Put/seqid=0 2024-11-20T22:25:27,796 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:27,796 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:25:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:27,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:27,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:27,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:27,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:27,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:27,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742326_1502 (size=12561) 2024-11-20T22:25:27,822 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/e194aa39de1d4eeb837a7386faff706f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/e194aa39de1d4eeb837a7386faff706f 2024-11-20T22:25:27,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742327_1503 (size=12663) 2024-11-20T22:25:27,827 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/A of 6f10e22f8d7346b15976f24fa4b38050 into e194aa39de1d4eeb837a7386faff706f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:27,827 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:27,827 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/A, priority=13, startTime=1732141527731; duration=0sec 2024-11-20T22:25:27,827 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:27,827 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:A 2024-11-20T22:25:27,827 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:27,828 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:27,828 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/C is initiating minor compaction (all files) 2024-11-20T22:25:27,828 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/C in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:27,828 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0fbda6d3f6c149a8944c423860de4cc8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/37fbdb7af9d645fca4be0a0deea235fb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/d6bb2eeaa5ec496888df661296101b41] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=36.0 K 2024-11-20T22:25:27,829 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fbda6d3f6c149a8944c423860de4cc8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141524961 2024-11-20T22:25:27,830 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37fbdb7af9d645fca4be0a0deea235fb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732141525322 2024-11-20T22:25:27,830 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6bb2eeaa5ec496888df661296101b41, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732141526494 2024-11-20T22:25:27,833 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/ec2136fb57fc4ce99f89621b45159164 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ec2136fb57fc4ce99f89621b45159164 2024-11-20T22:25:27,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/ff8e04397b8c4b5a9d5dd38b1287d37c is 50, key is test_row_0/A:col10/1732141527189/Put/seqid=0 2024-11-20T22:25:27,847 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/B of 6f10e22f8d7346b15976f24fa4b38050 into ec2136fb57fc4ce99f89621b45159164(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:27,848 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:27,848 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/B, priority=13, startTime=1732141527732; duration=0sec 2024-11-20T22:25:27,848 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:27,848 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:B 2024-11-20T22:25:27,870 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#C#compaction#420 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:27,870 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/595d8c03a3aa4787960185f0f629701a is 50, key is test_row_0/C:col10/1732141526511/Put/seqid=0 2024-11-20T22:25:27,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:27,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:27,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742328_1504 (size=12151) 2024-11-20T22:25:27,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141587899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141587900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141587901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141587915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141587915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:27,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742329_1505 (size=12663) 2024-11-20T22:25:27,933 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/595d8c03a3aa4787960185f0f629701a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/595d8c03a3aa4787960185f0f629701a 2024-11-20T22:25:27,937 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/C of 6f10e22f8d7346b15976f24fa4b38050 into 595d8c03a3aa4787960185f0f629701a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:27,937 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:27,937 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/C, priority=13, startTime=1732141527732; duration=0sec 2024-11-20T22:25:27,937 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:27,937 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:C 2024-11-20T22:25:28,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141588017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141588017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141588017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141588029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141588029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:28,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141588226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141588226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141588229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141588235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141588237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,293 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/ff8e04397b8c4b5a9d5dd38b1287d37c 2024-11-20T22:25:28,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/37996cfa94fa42799bca2e71e28c6058 is 50, key is test_row_0/B:col10/1732141527189/Put/seqid=0 2024-11-20T22:25:28,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742330_1506 (size=12151) 2024-11-20T22:25:28,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141588538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141588539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141588539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141588546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141588546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:28,747 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/37996cfa94fa42799bca2e71e28c6058 2024-11-20T22:25:28,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/a9574a5dd95946ed8b10eab78eac351b is 50, key is test_row_0/C:col10/1732141527189/Put/seqid=0 2024-11-20T22:25:28,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742331_1507 (size=12151) 2024-11-20T22:25:29,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141589043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:29,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141589043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:29,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141589045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:29,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141589060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:29,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141589064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:29,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:29,210 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/a9574a5dd95946ed8b10eab78eac351b 2024-11-20T22:25:29,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/ff8e04397b8c4b5a9d5dd38b1287d37c as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ff8e04397b8c4b5a9d5dd38b1287d37c 2024-11-20T22:25:29,227 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ff8e04397b8c4b5a9d5dd38b1287d37c, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T22:25:29,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/37996cfa94fa42799bca2e71e28c6058 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/37996cfa94fa42799bca2e71e28c6058 2024-11-20T22:25:29,231 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/37996cfa94fa42799bca2e71e28c6058, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T22:25:29,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/a9574a5dd95946ed8b10eab78eac351b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/a9574a5dd95946ed8b10eab78eac351b 2024-11-20T22:25:29,238 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/a9574a5dd95946ed8b10eab78eac351b, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T22:25:29,242 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 6f10e22f8d7346b15976f24fa4b38050 in 1446ms, sequenceid=235, compaction requested=false 2024-11-20T22:25:29,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:29,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:29,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-20T22:25:29,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-20T22:25:29,244 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-20T22:25:29,245 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2160 sec 2024-11-20T22:25:29,246 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 2.2220 sec 2024-11-20T22:25:30,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:25:30,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:30,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:30,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:30,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:30,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:30,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:30,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:30,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6376d112d727411f92029fe5078f1ff5 is 50, key is test_row_0/A:col10/1732141530056/Put/seqid=0 2024-11-20T22:25:30,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742332_1508 (size=14541) 2024-11-20T22:25:30,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6376d112d727411f92029fe5078f1ff5 2024-11-20T22:25:30,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/dcb82d8c320d4c4aa8000e1094c7e700 is 50, key is test_row_0/B:col10/1732141530056/Put/seqid=0 2024-11-20T22:25:30,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141590140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141590140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141590141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141590148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141590151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742333_1509 (size=12151) 2024-11-20T22:25:30,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/dcb82d8c320d4c4aa8000e1094c7e700 2024-11-20T22:25:30,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/5b6a0e211fde43cbb9e5dc9b233f8435 is 50, key is test_row_0/C:col10/1732141530056/Put/seqid=0 2024-11-20T22:25:30,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742334_1510 (size=12151) 2024-11-20T22:25:30,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/5b6a0e211fde43cbb9e5dc9b233f8435 2024-11-20T22:25:30,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6376d112d727411f92029fe5078f1ff5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6376d112d727411f92029fe5078f1ff5 2024-11-20T22:25:30,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6376d112d727411f92029fe5078f1ff5, entries=200, sequenceid=253, filesize=14.2 K 2024-11-20T22:25:30,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141590257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/dcb82d8c320d4c4aa8000e1094c7e700 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/dcb82d8c320d4c4aa8000e1094c7e700 2024-11-20T22:25:30,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141590259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/dcb82d8c320d4c4aa8000e1094c7e700, entries=150, sequenceid=253, filesize=11.9 K 2024-11-20T22:25:30,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/5b6a0e211fde43cbb9e5dc9b233f8435 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/5b6a0e211fde43cbb9e5dc9b233f8435 2024-11-20T22:25:30,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141590264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141590265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/5b6a0e211fde43cbb9e5dc9b233f8435, entries=150, sequenceid=253, filesize=11.9 K 2024-11-20T22:25:30,285 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 6f10e22f8d7346b15976f24fa4b38050 in 227ms, sequenceid=253, compaction requested=true 2024-11-20T22:25:30,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:30,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:30,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:30,286 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:30,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:30,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:30,286 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:30,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:30,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:30,287 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:30,287 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39253 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:30,287 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/B is initiating minor compaction (all files) 2024-11-20T22:25:30,287 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/A is initiating minor compaction (all files) 2024-11-20T22:25:30,287 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/B in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:30,287 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ec2136fb57fc4ce99f89621b45159164, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/37996cfa94fa42799bca2e71e28c6058, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/dcb82d8c320d4c4aa8000e1094c7e700] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=36.1 K 2024-11-20T22:25:30,287 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/A in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:30,287 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/e194aa39de1d4eeb837a7386faff706f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ff8e04397b8c4b5a9d5dd38b1287d37c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6376d112d727411f92029fe5078f1ff5] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=38.3 K 2024-11-20T22:25:30,287 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ec2136fb57fc4ce99f89621b45159164, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732141526494 2024-11-20T22:25:30,288 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting e194aa39de1d4eeb837a7386faff706f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732141526494 2024-11-20T22:25:30,288 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 37996cfa94fa42799bca2e71e28c6058, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732141527168 2024-11-20T22:25:30,290 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff8e04397b8c4b5a9d5dd38b1287d37c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732141527168 2024-11-20T22:25:30,291 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting dcb82d8c320d4c4aa8000e1094c7e700, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732141527899 2024-11-20T22:25:30,291 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6376d112d727411f92029fe5078f1ff5, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732141527899 2024-11-20T22:25:30,294 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:25:30,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:30,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:30,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:30,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:30,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:30,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:30,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/46cce630536345bcb4833eb6b0210090 is 50, key is test_row_0/A:col10/1732141530141/Put/seqid=0 2024-11-20T22:25:30,324 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#B#compaction#427 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:30,325 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/8b657c38599e4fc8a5f9ce2d57dea0f8 is 50, key is test_row_0/B:col10/1732141530056/Put/seqid=0 2024-11-20T22:25:30,326 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#A#compaction#428 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:30,336 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/668b37cf8cdf41ce86b96a30de413bd8 is 50, key is test_row_0/A:col10/1732141530056/Put/seqid=0 2024-11-20T22:25:30,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742335_1511 (size=14741) 2024-11-20T22:25:30,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/46cce630536345bcb4833eb6b0210090 2024-11-20T22:25:30,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742337_1513 (size=12663) 2024-11-20T22:25:30,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/47e785e703784534ada512067c39f6bb is 50, key is test_row_0/B:col10/1732141530141/Put/seqid=0 2024-11-20T22:25:30,403 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/668b37cf8cdf41ce86b96a30de413bd8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/668b37cf8cdf41ce86b96a30de413bd8 2024-11-20T22:25:30,408 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/A of 6f10e22f8d7346b15976f24fa4b38050 into 668b37cf8cdf41ce86b96a30de413bd8(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:30,408 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:30,408 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/A, priority=13, startTime=1732141530286; duration=0sec 2024-11-20T22:25:30,413 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:30,413 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:A 2024-11-20T22:25:30,413 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:30,419 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:30,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742336_1512 (size=12765) 2024-11-20T22:25:30,424 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/C is initiating minor compaction (all files) 2024-11-20T22:25:30,424 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/C in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:30,426 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/595d8c03a3aa4787960185f0f629701a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/a9574a5dd95946ed8b10eab78eac351b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/5b6a0e211fde43cbb9e5dc9b233f8435] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=36.1 K 2024-11-20T22:25:30,431 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/8b657c38599e4fc8a5f9ce2d57dea0f8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/8b657c38599e4fc8a5f9ce2d57dea0f8 2024-11-20T22:25:30,431 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 595d8c03a3aa4787960185f0f629701a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732141526494 2024-11-20T22:25:30,433 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9574a5dd95946ed8b10eab78eac351b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732141527168 2024-11-20T22:25:30,434 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b6a0e211fde43cbb9e5dc9b233f8435, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732141527899 2024-11-20T22:25:30,441 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/B of 6f10e22f8d7346b15976f24fa4b38050 into 8b657c38599e4fc8a5f9ce2d57dea0f8(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:30,441 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:30,441 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/B, priority=13, startTime=1732141530286; duration=0sec 2024-11-20T22:25:30,441 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:30,441 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:B 2024-11-20T22:25:30,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742338_1514 (size=12301) 2024-11-20T22:25:30,453 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/47e785e703784534ada512067c39f6bb 2024-11-20T22:25:30,453 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#C#compaction#430 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:30,454 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/271d5d3651cc49368b039c1761d8034d is 50, key is test_row_0/C:col10/1732141530056/Put/seqid=0 2024-11-20T22:25:30,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141590475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141590477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141590478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141590482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141590483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,492 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/43709d321f1f49b69faf1b1993b37517 is 50, key is test_row_0/C:col10/1732141530141/Put/seqid=0 2024-11-20T22:25:30,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742339_1515 (size=12765) 2024-11-20T22:25:30,526 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/271d5d3651cc49368b039c1761d8034d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/271d5d3651cc49368b039c1761d8034d 2024-11-20T22:25:30,531 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/C of 6f10e22f8d7346b15976f24fa4b38050 into 271d5d3651cc49368b039c1761d8034d(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:30,531 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:30,531 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/C, priority=13, startTime=1732141530286; duration=0sec 2024-11-20T22:25:30,531 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:30,531 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:C 2024-11-20T22:25:30,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742340_1516 (size=12301) 2024-11-20T22:25:30,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141590580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141590580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141590780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141590789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141590792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141590793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141590795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:30,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/43709d321f1f49b69faf1b1993b37517 2024-11-20T22:25:30,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/46cce630536345bcb4833eb6b0210090 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/46cce630536345bcb4833eb6b0210090 2024-11-20T22:25:30,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/46cce630536345bcb4833eb6b0210090, entries=200, sequenceid=276, filesize=14.4 K 2024-11-20T22:25:30,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/47e785e703784534ada512067c39f6bb as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/47e785e703784534ada512067c39f6bb 2024-11-20T22:25:30,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/47e785e703784534ada512067c39f6bb, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T22:25:30,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/43709d321f1f49b69faf1b1993b37517 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/43709d321f1f49b69faf1b1993b37517 2024-11-20T22:25:30,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/43709d321f1f49b69faf1b1993b37517, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T22:25:30,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6f10e22f8d7346b15976f24fa4b38050 in 698ms, sequenceid=276, compaction requested=false 2024-11-20T22:25:30,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:31,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:25:31,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:31,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:31,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:31,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:31,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:31,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:31,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:31,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6d61e069e689497c8ce0dc7ea0978009 is 50, key is test_row_0/A:col10/1732141531105/Put/seqid=0 2024-11-20T22:25:31,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:31,134 INFO [Thread-2042 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-20T22:25:31,145 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:31,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-20T22:25:31,146 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:31,148 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:31,148 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:31,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T22:25:31,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742341_1517 (size=17181) 2024-11-20T22:25:31,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6d61e069e689497c8ce0dc7ea0978009 2024-11-20T22:25:31,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141591179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141591180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/1ac9e422845b44a5b5f5a860b9787785 is 50, key is test_row_0/B:col10/1732141531105/Put/seqid=0 2024-11-20T22:25:31,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742342_1518 (size=12301) 2024-11-20T22:25:31,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T22:25:31,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141591287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141591287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141591289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,300 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T22:25:31,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:31,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:31,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:31,301 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:31,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:31,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:31,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141591301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141591302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T22:25:31,453 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T22:25:31,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:31,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:31,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:31,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:31,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:31,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:31,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141591492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141591492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,605 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T22:25:31,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:31,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:31,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:31,606 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:31,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:31,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:31,629 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/1ac9e422845b44a5b5f5a860b9787785 2024-11-20T22:25:31,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/8bcbb471ca7441d699c63a0ff01ac331 is 50, key is test_row_0/C:col10/1732141531105/Put/seqid=0 2024-11-20T22:25:31,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742343_1519 (size=12301) 2024-11-20T22:25:31,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/8bcbb471ca7441d699c63a0ff01ac331 2024-11-20T22:25:31,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/6d61e069e689497c8ce0dc7ea0978009 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6d61e069e689497c8ce0dc7ea0978009 2024-11-20T22:25:31,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6d61e069e689497c8ce0dc7ea0978009, entries=250, sequenceid=294, filesize=16.8 K 2024-11-20T22:25:31,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/1ac9e422845b44a5b5f5a860b9787785 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/1ac9e422845b44a5b5f5a860b9787785 2024-11-20T22:25:31,714 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/1ac9e422845b44a5b5f5a860b9787785, entries=150, sequenceid=294, filesize=12.0 K 2024-11-20T22:25:31,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/8bcbb471ca7441d699c63a0ff01ac331 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8bcbb471ca7441d699c63a0ff01ac331 2024-11-20T22:25:31,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8bcbb471ca7441d699c63a0ff01ac331, entries=150, sequenceid=294, filesize=12.0 K 2024-11-20T22:25:31,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 6f10e22f8d7346b15976f24fa4b38050 in 626ms, sequenceid=294, compaction requested=true 2024-11-20T22:25:31,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:31,732 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:31,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:31,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:31,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:31,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:31,733 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:31,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:31,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:31,738 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:31,738 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:31,738 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/B is initiating minor compaction (all files) 2024-11-20T22:25:31,738 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/A is initiating minor compaction (all files) 2024-11-20T22:25:31,738 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/A in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:31,738 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/B in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:31,738 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/8b657c38599e4fc8a5f9ce2d57dea0f8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/47e785e703784534ada512067c39f6bb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/1ac9e422845b44a5b5f5a860b9787785] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=36.5 K 2024-11-20T22:25:31,738 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/668b37cf8cdf41ce86b96a30de413bd8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/46cce630536345bcb4833eb6b0210090, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6d61e069e689497c8ce0dc7ea0978009] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=43.5 K 2024-11-20T22:25:31,739 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b657c38599e4fc8a5f9ce2d57dea0f8, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732141527899 2024-11-20T22:25:31,739 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 668b37cf8cdf41ce86b96a30de413bd8, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732141527899 2024-11-20T22:25:31,739 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46cce630536345bcb4833eb6b0210090, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732141530139 2024-11-20T22:25:31,740 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 47e785e703784534ada512067c39f6bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732141530140 2024-11-20T22:25:31,741 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d61e069e689497c8ce0dc7ea0978009, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732141530410 2024-11-20T22:25:31,746 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ac9e422845b44a5b5f5a860b9787785, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732141530476 2024-11-20T22:25:31,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T22:25:31,762 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T22:25:31,764 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#A#compaction#435 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:31,765 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/7f43ac5dee324cad8544cb79f2cd857d is 50, key is test_row_0/A:col10/1732141531105/Put/seqid=0 2024-11-20T22:25:31,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:31,767 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T22:25:31,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:31,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:31,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:31,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:31,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:31,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:31,770 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#B#compaction#436 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:31,771 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/baf5f7850bef4fb3af519d2d81d83d74 is 50, key is test_row_0/B:col10/1732141531105/Put/seqid=0 2024-11-20T22:25:31,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742344_1520 (size=12915) 2024-11-20T22:25:31,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/14a15e5c8a384ae09e6f9ceb6a771da3 is 50, key is test_row_0/A:col10/1732141531158/Put/seqid=0 2024-11-20T22:25:31,812 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/7f43ac5dee324cad8544cb79f2cd857d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/7f43ac5dee324cad8544cb79f2cd857d 2024-11-20T22:25:31,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:31,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:31,823 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/A of 6f10e22f8d7346b15976f24fa4b38050 into 7f43ac5dee324cad8544cb79f2cd857d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:31,823 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:31,823 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/A, priority=13, startTime=1732141531731; duration=0sec 2024-11-20T22:25:31,823 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:31,823 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:A 2024-11-20T22:25:31,824 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:31,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742345_1521 (size=13017) 2024-11-20T22:25:31,825 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:31,825 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/C is initiating minor compaction (all files) 2024-11-20T22:25:31,825 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/C in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:31,826 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/271d5d3651cc49368b039c1761d8034d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/43709d321f1f49b69faf1b1993b37517, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8bcbb471ca7441d699c63a0ff01ac331] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=36.5 K 2024-11-20T22:25:31,826 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 271d5d3651cc49368b039c1761d8034d, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732141527899 2024-11-20T22:25:31,826 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43709d321f1f49b69faf1b1993b37517, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732141530140 2024-11-20T22:25:31,826 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8bcbb471ca7441d699c63a0ff01ac331, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732141530476 2024-11-20T22:25:31,834 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/baf5f7850bef4fb3af519d2d81d83d74 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/baf5f7850bef4fb3af519d2d81d83d74 2024-11-20T22:25:31,840 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/B of 6f10e22f8d7346b15976f24fa4b38050 into baf5f7850bef4fb3af519d2d81d83d74(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:31,840 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:31,840 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/B, priority=13, startTime=1732141531733; duration=0sec 2024-11-20T22:25:31,840 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:31,840 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:B 2024-11-20T22:25:31,855 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#C#compaction#438 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:31,856 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/bf23b929857343a5a9236b6d60e7facf is 50, key is test_row_0/C:col10/1732141531105/Put/seqid=0 2024-11-20T22:25:31,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742346_1522 (size=12301) 2024-11-20T22:25:31,857 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/14a15e5c8a384ae09e6f9ceb6a771da3 2024-11-20T22:25:31,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/3f7d7ed385f74190a56fa9d846274b79 is 50, key is test_row_0/B:col10/1732141531158/Put/seqid=0 2024-11-20T22:25:31,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141591890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742347_1523 (size=13017) 2024-11-20T22:25:31,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141591908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:31,924 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/bf23b929857343a5a9236b6d60e7facf as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/bf23b929857343a5a9236b6d60e7facf 2024-11-20T22:25:31,928 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/C of 6f10e22f8d7346b15976f24fa4b38050 into bf23b929857343a5a9236b6d60e7facf(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:31,928 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:31,928 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/C, priority=13, startTime=1732141531733; duration=0sec 2024-11-20T22:25:31,928 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:31,928 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:C 2024-11-20T22:25:31,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742348_1524 (size=12301) 2024-11-20T22:25:32,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141592009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:32,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:32,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141592024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:32,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:32,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141592216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:32,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:32,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141592229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:32,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T22:25:32,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:32,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141592300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:32,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:32,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141592313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:32,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:32,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141592313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:32,350 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/3f7d7ed385f74190a56fa9d846274b79 2024-11-20T22:25:32,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/139e1b645f1f41b69f863990ef31c40f is 50, key is test_row_0/C:col10/1732141531158/Put/seqid=0 2024-11-20T22:25:32,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742349_1525 (size=12301) 2024-11-20T22:25:32,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:32,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141592520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:32,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:32,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141592537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:32,773 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/139e1b645f1f41b69f863990ef31c40f 2024-11-20T22:25:32,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/14a15e5c8a384ae09e6f9ceb6a771da3 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/14a15e5c8a384ae09e6f9ceb6a771da3 2024-11-20T22:25:32,786 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/14a15e5c8a384ae09e6f9ceb6a771da3, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T22:25:32,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/3f7d7ed385f74190a56fa9d846274b79 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/3f7d7ed385f74190a56fa9d846274b79 2024-11-20T22:25:32,790 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/3f7d7ed385f74190a56fa9d846274b79, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T22:25:32,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/139e1b645f1f41b69f863990ef31c40f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/139e1b645f1f41b69f863990ef31c40f 2024-11-20T22:25:32,794 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/139e1b645f1f41b69f863990ef31c40f, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T22:25:32,795 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 6f10e22f8d7346b15976f24fa4b38050 in 1028ms, sequenceid=315, compaction requested=false 2024-11-20T22:25:32,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:32,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:32,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-20T22:25:32,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-20T22:25:32,797 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-20T22:25:32,797 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6480 sec 2024-11-20T22:25:32,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.6520 sec 2024-11-20T22:25:33,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:33,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T22:25:33,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:33,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:33,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:33,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:33,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:33,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:33,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/bfe1c16450524718a2ba3387ac84ad90 is 50, key is test_row_0/A:col10/1732141533028/Put/seqid=0 2024-11-20T22:25:33,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742350_1526 (size=14741) 2024-11-20T22:25:33,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:33,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141593087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:33,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:33,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141593087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:33,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:33,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141593203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:33,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:33,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141593203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:33,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T22:25:33,253 INFO [Thread-2042 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-20T22:25:33,253 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:33,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-20T22:25:33,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T22:25:33,255 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:33,255 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:33,255 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:33,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T22:25:33,409 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:33,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T22:25:33,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:33,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:33,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:33,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:33,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:33,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:33,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:33,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141593412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:33,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:33,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141593414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:33,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/bfe1c16450524718a2ba3387ac84ad90 2024-11-20T22:25:33,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/c27a7c54395943fa9b66bd70b92dd0d5 is 50, key is test_row_0/B:col10/1732141533028/Put/seqid=0 2024-11-20T22:25:33,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742351_1527 (size=12301) 2024-11-20T22:25:33,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/c27a7c54395943fa9b66bd70b92dd0d5 2024-11-20T22:25:33,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/c770860fc8e74dfaaaa51edb91a3bdb3 is 50, key is test_row_0/C:col10/1732141533028/Put/seqid=0 2024-11-20T22:25:33,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742352_1528 (size=12301) 2024-11-20T22:25:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T22:25:33,561 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:33,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T22:25:33,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:33,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:33,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:33,562 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:33,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:33,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:33,713 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:33,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T22:25:33,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:33,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:33,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:33,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:33,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:33,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:33,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:33,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141593717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:33,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:33,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141593717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:33,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T22:25:33,866 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:33,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T22:25:33,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:33,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:33,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:33,866 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:33,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:33,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:33,893 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/c770860fc8e74dfaaaa51edb91a3bdb3 2024-11-20T22:25:33,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/bfe1c16450524718a2ba3387ac84ad90 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/bfe1c16450524718a2ba3387ac84ad90 2024-11-20T22:25:33,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/bfe1c16450524718a2ba3387ac84ad90, entries=200, sequenceid=334, filesize=14.4 K 2024-11-20T22:25:33,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/c27a7c54395943fa9b66bd70b92dd0d5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c27a7c54395943fa9b66bd70b92dd0d5 2024-11-20T22:25:33,902 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c27a7c54395943fa9b66bd70b92dd0d5, entries=150, sequenceid=334, filesize=12.0 K 2024-11-20T22:25:33,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/c770860fc8e74dfaaaa51edb91a3bdb3 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/c770860fc8e74dfaaaa51edb91a3bdb3 2024-11-20T22:25:33,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/c770860fc8e74dfaaaa51edb91a3bdb3, entries=150, sequenceid=334, filesize=12.0 K 2024-11-20T22:25:33,907 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 6f10e22f8d7346b15976f24fa4b38050 in 878ms, sequenceid=334, compaction requested=true 2024-11-20T22:25:33,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:33,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:33,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:33,907 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:33,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:33,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:33,907 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:33,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:33,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:33,908 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39957 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:33,908 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:33,908 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/A is initiating minor compaction (all files) 2024-11-20T22:25:33,908 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/B is initiating minor compaction (all files) 2024-11-20T22:25:33,908 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/A in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:33,908 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/B in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:33,908 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/7f43ac5dee324cad8544cb79f2cd857d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/14a15e5c8a384ae09e6f9ceb6a771da3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/bfe1c16450524718a2ba3387ac84ad90] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=39.0 K 2024-11-20T22:25:33,908 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/baf5f7850bef4fb3af519d2d81d83d74, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/3f7d7ed385f74190a56fa9d846274b79, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c27a7c54395943fa9b66bd70b92dd0d5] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=36.7 K 2024-11-20T22:25:33,908 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f43ac5dee324cad8544cb79f2cd857d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732141530476 2024-11-20T22:25:33,908 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting baf5f7850bef4fb3af519d2d81d83d74, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732141530476 2024-11-20T22:25:33,908 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14a15e5c8a384ae09e6f9ceb6a771da3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732141531158 2024-11-20T22:25:33,908 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f7d7ed385f74190a56fa9d846274b79, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732141531158 2024-11-20T22:25:33,909 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfe1c16450524718a2ba3387ac84ad90, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732141531878 2024-11-20T22:25:33,909 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c27a7c54395943fa9b66bd70b92dd0d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732141531878 2024-11-20T22:25:33,915 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#A#compaction#444 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:33,915 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#B#compaction#445 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:33,915 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/1bf1aef0573d4621ba6728f5d3133d6a is 50, key is test_row_0/A:col10/1732141533028/Put/seqid=0 2024-11-20T22:25:33,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742353_1529 (size=13017) 2024-11-20T22:25:33,925 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/47ff964aa70c42559f9e06e7d376a6fb is 50, key is test_row_0/B:col10/1732141533028/Put/seqid=0 2024-11-20T22:25:33,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742354_1530 (size=13119) 2024-11-20T22:25:34,017 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:34,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T22:25:34,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:34,018 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T22:25:34,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:34,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:34,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:34,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:34,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:34,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:34,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/bdf0292f32b745049006ef1c42403572 is 50, key is test_row_0/A:col10/1732141533077/Put/seqid=0 2024-11-20T22:25:34,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742355_1531 (size=12301) 2024-11-20T22:25:34,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:34,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:34,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:34,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:34,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141594273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:34,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141594273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:34,322 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/1bf1aef0573d4621ba6728f5d3133d6a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/1bf1aef0573d4621ba6728f5d3133d6a 2024-11-20T22:25:34,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:34,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141594320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:34,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:34,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141594320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:34,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:34,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141594323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:34,327 DEBUG [Thread-2036 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., hostname=6365a1e51efd,44631,1732141399950, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:25:34,327 DEBUG [Thread-2038 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4186 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., hostname=6365a1e51efd,44631,1732141399950, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:25:34,328 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/A of 6f10e22f8d7346b15976f24fa4b38050 into 1bf1aef0573d4621ba6728f5d3133d6a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:34,328 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:34,328 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/A, priority=13, startTime=1732141533907; duration=0sec 2024-11-20T22:25:34,328 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:34,328 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:A 2024-11-20T22:25:34,328 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:34,329 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:34,329 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/C is initiating minor compaction (all files) 2024-11-20T22:25:34,329 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/C in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:34,329 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/bf23b929857343a5a9236b6d60e7facf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/139e1b645f1f41b69f863990ef31c40f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/c770860fc8e74dfaaaa51edb91a3bdb3] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=36.7 K 2024-11-20T22:25:34,329 DEBUG [Thread-2032 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4190 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., hostname=6365a1e51efd,44631,1732141399950, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:25:34,330 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf23b929857343a5a9236b6d60e7facf, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732141530476 2024-11-20T22:25:34,330 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 139e1b645f1f41b69f863990ef31c40f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732141531158 2024-11-20T22:25:34,333 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting c770860fc8e74dfaaaa51edb91a3bdb3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732141531878 2024-11-20T22:25:34,339 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/47ff964aa70c42559f9e06e7d376a6fb as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/47ff964aa70c42559f9e06e7d376a6fb 2024-11-20T22:25:34,342 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/B of 6f10e22f8d7346b15976f24fa4b38050 into 47ff964aa70c42559f9e06e7d376a6fb(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:34,342 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:34,342 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/B, priority=13, startTime=1732141533907; duration=0sec 2024-11-20T22:25:34,342 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:34,342 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:B 2024-11-20T22:25:34,343 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#C#compaction#447 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:34,343 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/6a60e1c3f80f4a5caa548c2c6e5ea94d is 50, key is test_row_0/C:col10/1732141533028/Put/seqid=0 2024-11-20T22:25:34,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T22:25:34,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742356_1532 (size=13119) 2024-11-20T22:25:34,387 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/6a60e1c3f80f4a5caa548c2c6e5ea94d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/6a60e1c3f80f4a5caa548c2c6e5ea94d 2024-11-20T22:25:34,391 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/C of 6f10e22f8d7346b15976f24fa4b38050 into 6a60e1c3f80f4a5caa548c2c6e5ea94d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:34,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:34,391 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:34,391 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/C, priority=13, startTime=1732141533907; duration=0sec 2024-11-20T22:25:34,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141594384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:34,391 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:34,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:34,391 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:C 2024-11-20T22:25:34,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141594384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:34,424 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/bdf0292f32b745049006ef1c42403572 2024-11-20T22:25:34,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/0cceadd306d844caaa48d81ff580f431 is 50, key is test_row_0/B:col10/1732141533077/Put/seqid=0 2024-11-20T22:25:34,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742357_1533 (size=12301) 2024-11-20T22:25:34,475 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/0cceadd306d844caaa48d81ff580f431 2024-11-20T22:25:34,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/8974c75f93a54e03a6bdc14ef98fb326 is 50, key is test_row_0/C:col10/1732141533077/Put/seqid=0 2024-11-20T22:25:34,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742358_1534 (size=12301) 2024-11-20T22:25:34,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:34,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141594592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:34,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:34,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141594592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:34,893 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/8974c75f93a54e03a6bdc14ef98fb326 2024-11-20T22:25:34,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/bdf0292f32b745049006ef1c42403572 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/bdf0292f32b745049006ef1c42403572 2024-11-20T22:25:34,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:34,899 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/bdf0292f32b745049006ef1c42403572, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T22:25:34,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/0cceadd306d844caaa48d81ff580f431 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/0cceadd306d844caaa48d81ff580f431 2024-11-20T22:25:34,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141594897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:34,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:34,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141594898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:34,907 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/0cceadd306d844caaa48d81ff580f431, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T22:25:34,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/8974c75f93a54e03a6bdc14ef98fb326 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8974c75f93a54e03a6bdc14ef98fb326 2024-11-20T22:25:34,910 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8974c75f93a54e03a6bdc14ef98fb326, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T22:25:34,911 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 6f10e22f8d7346b15976f24fa4b38050 in 893ms, sequenceid=354, compaction requested=false 2024-11-20T22:25:34,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:34,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:34,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-20T22:25:34,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-20T22:25:34,915 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-20T22:25:34,915 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6580 sec 2024-11-20T22:25:34,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.6620 sec 2024-11-20T22:25:35,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T22:25:35,364 INFO [Thread-2042 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-20T22:25:35,365 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:35,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-20T22:25:35,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T22:25:35,367 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:35,367 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:35,367 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:35,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:35,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T22:25:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:35,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/c65e1169849e41bd813526ae8e7d2e33 is 50, key is test_row_0/A:col10/1732141535405/Put/seqid=0 2024-11-20T22:25:35,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742359_1535 (size=14741) 2024-11-20T22:25:35,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/c65e1169849e41bd813526ae8e7d2e33 2024-11-20T22:25:35,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/f9bfb1f46eb242d5927e675cf190fc65 is 50, key is test_row_0/B:col10/1732141535405/Put/seqid=0 2024-11-20T22:25:35,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742360_1536 (size=12301) 2024-11-20T22:25:35,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T22:25:35,520 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:35,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T22:25:35,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:35,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:35,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:35,521 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:35,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:35,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:35,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:35,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141595527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:35,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:35,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141595528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:35,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:35,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141595634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:35,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:35,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141595634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:35,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T22:25:35,672 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:35,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T22:25:35,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:35,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:35,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:35,674 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:35,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:35,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:35,826 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:35,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T22:25:35,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:35,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:35,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:35,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:35,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:35,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/f9bfb1f46eb242d5927e675cf190fc65 2024-11-20T22:25:35,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/aef9b4cf1a69470fbff2ae6ad1e5a13f is 50, key is test_row_0/C:col10/1732141535405/Put/seqid=0 2024-11-20T22:25:35,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141595841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:35,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:35,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141595845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:35,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742361_1537 (size=12301) 2024-11-20T22:25:35,857 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/aef9b4cf1a69470fbff2ae6ad1e5a13f 2024-11-20T22:25:35,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/c65e1169849e41bd813526ae8e7d2e33 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/c65e1169849e41bd813526ae8e7d2e33 2024-11-20T22:25:35,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/c65e1169849e41bd813526ae8e7d2e33, entries=200, sequenceid=374, filesize=14.4 K 2024-11-20T22:25:35,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/f9bfb1f46eb242d5927e675cf190fc65 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/f9bfb1f46eb242d5927e675cf190fc65 2024-11-20T22:25:35,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/f9bfb1f46eb242d5927e675cf190fc65, entries=150, sequenceid=374, filesize=12.0 K 2024-11-20T22:25:35,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/aef9b4cf1a69470fbff2ae6ad1e5a13f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/aef9b4cf1a69470fbff2ae6ad1e5a13f 2024-11-20T22:25:35,916 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/aef9b4cf1a69470fbff2ae6ad1e5a13f, entries=150, sequenceid=374, filesize=12.0 K 2024-11-20T22:25:35,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 6f10e22f8d7346b15976f24fa4b38050 in 510ms, sequenceid=374, compaction requested=true 2024-11-20T22:25:35,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:35,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:35,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:35,917 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:35,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:35,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:35,917 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:35,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:35,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:35,918 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40059 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:35,918 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/A is initiating minor compaction (all files) 2024-11-20T22:25:35,919 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/A in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:35,919 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/1bf1aef0573d4621ba6728f5d3133d6a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/bdf0292f32b745049006ef1c42403572, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/c65e1169849e41bd813526ae8e7d2e33] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=39.1 K 2024-11-20T22:25:35,919 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:35,919 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/B is initiating minor compaction (all files) 2024-11-20T22:25:35,919 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/B in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:35,919 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/47ff964aa70c42559f9e06e7d376a6fb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/0cceadd306d844caaa48d81ff580f431, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/f9bfb1f46eb242d5927e675cf190fc65] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=36.8 K 2024-11-20T22:25:35,919 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bf1aef0573d4621ba6728f5d3133d6a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732141531878 2024-11-20T22:25:35,921 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 47ff964aa70c42559f9e06e7d376a6fb, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732141531878 2024-11-20T22:25:35,921 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0cceadd306d844caaa48d81ff580f431, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732141533077 2024-11-20T22:25:35,922 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting f9bfb1f46eb242d5927e675cf190fc65, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732141534257 2024-11-20T22:25:35,922 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdf0292f32b745049006ef1c42403572, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732141533077 2024-11-20T22:25:35,922 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting c65e1169849e41bd813526ae8e7d2e33, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732141534257 2024-11-20T22:25:35,932 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#A#compaction#453 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:35,932 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/00f1e27366234fa18388e37dad535171 is 50, key is test_row_0/A:col10/1732141535405/Put/seqid=0 2024-11-20T22:25:35,940 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#B#compaction#454 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:35,941 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/d23211cfc21147ee9b1526bb3b9720e5 is 50, key is test_row_0/B:col10/1732141535405/Put/seqid=0 2024-11-20T22:25:35,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742362_1538 (size=13119) 2024-11-20T22:25:35,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742363_1539 (size=13221) 2024-11-20T22:25:35,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T22:25:35,981 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:35,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T22:25:35,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:35,982 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T22:25:35,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:35,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:35,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:35,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:35,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:35,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:35,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/2ad0221cab114c98979d2dfca61dbca6 is 50, key is test_row_0/A:col10/1732141535526/Put/seqid=0 2024-11-20T22:25:35,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742364_1540 (size=12301) 2024-11-20T22:25:35,988 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/2ad0221cab114c98979d2dfca61dbca6 2024-11-20T22:25:35,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/ca2cae3b4564422692d464b4398b4c31 is 50, key is test_row_0/B:col10/1732141535526/Put/seqid=0 2024-11-20T22:25:35,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742365_1541 (size=12301) 2024-11-20T22:25:36,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:36,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:36,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:36,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141596206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:36,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:36,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141596206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:36,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:36,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141596311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:36,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:36,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141596313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:36,353 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/d23211cfc21147ee9b1526bb3b9720e5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/d23211cfc21147ee9b1526bb3b9720e5 2024-11-20T22:25:36,353 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/00f1e27366234fa18388e37dad535171 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/00f1e27366234fa18388e37dad535171 2024-11-20T22:25:36,359 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/B of 6f10e22f8d7346b15976f24fa4b38050 into d23211cfc21147ee9b1526bb3b9720e5(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:36,359 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/A of 6f10e22f8d7346b15976f24fa4b38050 into 00f1e27366234fa18388e37dad535171(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:36,359 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:36,359 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:36,359 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/A, priority=13, startTime=1732141535917; duration=0sec 2024-11-20T22:25:36,359 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/B, priority=13, startTime=1732141535917; duration=0sec 2024-11-20T22:25:36,359 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:36,359 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:B 2024-11-20T22:25:36,359 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:36,360 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:36,360 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:A 2024-11-20T22:25:36,360 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:36,360 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/C is initiating minor compaction (all files) 2024-11-20T22:25:36,360 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/C in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:36,360 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/6a60e1c3f80f4a5caa548c2c6e5ea94d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8974c75f93a54e03a6bdc14ef98fb326, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/aef9b4cf1a69470fbff2ae6ad1e5a13f] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=36.8 K 2024-11-20T22:25:36,361 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a60e1c3f80f4a5caa548c2c6e5ea94d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732141531878 2024-11-20T22:25:36,361 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 8974c75f93a54e03a6bdc14ef98fb326, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732141533077 2024-11-20T22:25:36,362 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting aef9b4cf1a69470fbff2ae6ad1e5a13f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732141534257 2024-11-20T22:25:36,372 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#C#compaction#457 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:36,373 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/cc12c05b6591469b968377c1d99d07f0 is 50, key is test_row_0/C:col10/1732141535405/Put/seqid=0 2024-11-20T22:25:36,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742366_1542 (size=13221) 2024-11-20T22:25:36,408 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/ca2cae3b4564422692d464b4398b4c31 2024-11-20T22:25:36,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/870bb9f4d67549aab859562ee98be0da is 50, key is test_row_0/C:col10/1732141535526/Put/seqid=0 2024-11-20T22:25:36,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742367_1543 (size=12301) 2024-11-20T22:25:36,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T22:25:36,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:36,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141596514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:36,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:36,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141596521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:36,793 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/cc12c05b6591469b968377c1d99d07f0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/cc12c05b6591469b968377c1d99d07f0 2024-11-20T22:25:36,804 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/C of 6f10e22f8d7346b15976f24fa4b38050 into cc12c05b6591469b968377c1d99d07f0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:36,804 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:36,804 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/C, priority=13, startTime=1732141535917; duration=0sec 2024-11-20T22:25:36,804 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:36,804 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:C 2024-11-20T22:25:36,821 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/870bb9f4d67549aab859562ee98be0da 2024-11-20T22:25:36,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:36,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141596821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:36,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/2ad0221cab114c98979d2dfca61dbca6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2ad0221cab114c98979d2dfca61dbca6 2024-11-20T22:25:36,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:36,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141596828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:36,839 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2ad0221cab114c98979d2dfca61dbca6, entries=150, sequenceid=394, filesize=12.0 K 2024-11-20T22:25:36,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/ca2cae3b4564422692d464b4398b4c31 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ca2cae3b4564422692d464b4398b4c31 2024-11-20T22:25:36,843 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ca2cae3b4564422692d464b4398b4c31, entries=150, sequenceid=394, filesize=12.0 K 2024-11-20T22:25:36,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/870bb9f4d67549aab859562ee98be0da as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/870bb9f4d67549aab859562ee98be0da 2024-11-20T22:25:36,846 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/870bb9f4d67549aab859562ee98be0da, entries=150, sequenceid=394, filesize=12.0 K 2024-11-20T22:25:36,847 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 6f10e22f8d7346b15976f24fa4b38050 in 865ms, sequenceid=394, compaction requested=false 2024-11-20T22:25:36,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:36,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:36,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-20T22:25:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-20T22:25:36,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-20T22:25:36,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4810 sec 2024-11-20T22:25:36,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.4840 sec 2024-11-20T22:25:37,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T22:25:37,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:37,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:37,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:37,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:37,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:37,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:37,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:37,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/1ecc00a4486242988ab6320dcf6683f6 is 50, key is test_row_0/A:col10/1732141537332/Put/seqid=0 2024-11-20T22:25:37,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742368_1544 (size=14741) 2024-11-20T22:25:37,346 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/1ecc00a4486242988ab6320dcf6683f6 2024-11-20T22:25:37,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/6d931755b819488bbd019176debb3fcc is 50, key is test_row_0/B:col10/1732141537332/Put/seqid=0 2024-11-20T22:25:37,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742369_1545 (size=12301) 2024-11-20T22:25:37,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/6d931755b819488bbd019176debb3fcc 2024-11-20T22:25:37,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/b6d747aa7c7c40dd80a926b33e4d4697 is 50, key is test_row_0/C:col10/1732141537332/Put/seqid=0 2024-11-20T22:25:37,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141597409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:37,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141597410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:37,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742370_1546 (size=12301) 2024-11-20T22:25:37,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/b6d747aa7c7c40dd80a926b33e4d4697 2024-11-20T22:25:37,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/1ecc00a4486242988ab6320dcf6683f6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/1ecc00a4486242988ab6320dcf6683f6 2024-11-20T22:25:37,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/1ecc00a4486242988ab6320dcf6683f6, entries=200, sequenceid=414, filesize=14.4 K 2024-11-20T22:25:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T22:25:37,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/6d931755b819488bbd019176debb3fcc as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/6d931755b819488bbd019176debb3fcc 2024-11-20T22:25:37,478 INFO [Thread-2042 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-20T22:25:37,480 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:37,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-20T22:25:37,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:37,488 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:37,489 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:37,489 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:37,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/6d931755b819488bbd019176debb3fcc, entries=150, sequenceid=414, filesize=12.0 K 2024-11-20T22:25:37,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/b6d747aa7c7c40dd80a926b33e4d4697 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/b6d747aa7c7c40dd80a926b33e4d4697 2024-11-20T22:25:37,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/b6d747aa7c7c40dd80a926b33e4d4697, entries=150, sequenceid=414, filesize=12.0 K 2024-11-20T22:25:37,509 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 6f10e22f8d7346b15976f24fa4b38050 in 175ms, sequenceid=414, compaction requested=true 2024-11-20T22:25:37,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:37,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:37,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:37,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:37,510 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:37,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:37,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:37,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:25:37,510 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:37,513 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:37,513 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/B is initiating minor compaction (all files) 2024-11-20T22:25:37,513 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/B in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:37,514 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/d23211cfc21147ee9b1526bb3b9720e5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ca2cae3b4564422692d464b4398b4c31, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/6d931755b819488bbd019176debb3fcc] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=36.9 K 2024-11-20T22:25:37,514 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40161 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:37,514 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/A is initiating minor compaction (all files) 2024-11-20T22:25:37,514 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/A in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:37,514 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/00f1e27366234fa18388e37dad535171, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2ad0221cab114c98979d2dfca61dbca6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/1ecc00a4486242988ab6320dcf6683f6] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=39.2 K 2024-11-20T22:25:37,514 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting d23211cfc21147ee9b1526bb3b9720e5, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732141534257 2024-11-20T22:25:37,514 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00f1e27366234fa18388e37dad535171, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732141534257 2024-11-20T22:25:37,515 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ca2cae3b4564422692d464b4398b4c31, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732141535494 2024-11-20T22:25:37,516 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ad0221cab114c98979d2dfca61dbca6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732141535494 2024-11-20T22:25:37,516 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d931755b819488bbd019176debb3fcc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732141536181 2024-11-20T22:25:37,516 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ecc00a4486242988ab6320dcf6683f6, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732141536181 2024-11-20T22:25:37,525 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T22:25:37,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:37,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:37,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:37,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:37,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:37,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:37,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:37,545 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#A#compaction#462 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:37,545 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/a98ce1e0168240078f5151b40fcb6a1b is 50, key is test_row_0/A:col10/1732141537332/Put/seqid=0 2024-11-20T22:25:37,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/5b3a5b93bd0041fe8efb634c34655f69 is 50, key is test_row_0/A:col10/1732141537384/Put/seqid=0 2024-11-20T22:25:37,570 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#B#compaction#464 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:37,570 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/a91d6f112c1540d7bfcf1026389d4161 is 50, key is test_row_0/B:col10/1732141537332/Put/seqid=0 2024-11-20T22:25:37,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742371_1547 (size=13221) 2024-11-20T22:25:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:37,590 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/a98ce1e0168240078f5151b40fcb6a1b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a98ce1e0168240078f5151b40fcb6a1b 2024-11-20T22:25:37,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742373_1549 (size=13323) 2024-11-20T22:25:37,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742372_1548 (size=14741) 2024-11-20T22:25:37,594 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/5b3a5b93bd0041fe8efb634c34655f69 2024-11-20T22:25:37,601 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/A of 6f10e22f8d7346b15976f24fa4b38050 into a98ce1e0168240078f5151b40fcb6a1b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:37,601 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:37,601 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/A, priority=13, startTime=1732141537509; duration=0sec 2024-11-20T22:25:37,601 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:37,601 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:A 2024-11-20T22:25:37,601 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:37,605 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/a91d6f112c1540d7bfcf1026389d4161 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/a91d6f112c1540d7bfcf1026389d4161 2024-11-20T22:25:37,605 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:37,605 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/C is initiating minor compaction (all files) 2024-11-20T22:25:37,605 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/C in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:37,605 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/cc12c05b6591469b968377c1d99d07f0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/870bb9f4d67549aab859562ee98be0da, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/b6d747aa7c7c40dd80a926b33e4d4697] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=36.9 K 2024-11-20T22:25:37,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/08baf3ea002c49ad975ca8c03e06d81d is 50, key is test_row_0/B:col10/1732141537384/Put/seqid=0 2024-11-20T22:25:37,608 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc12c05b6591469b968377c1d99d07f0, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732141534257 2024-11-20T22:25:37,608 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 870bb9f4d67549aab859562ee98be0da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732141535494 2024-11-20T22:25:37,609 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6d747aa7c7c40dd80a926b33e4d4697, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732141536181 2024-11-20T22:25:37,611 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/B of 6f10e22f8d7346b15976f24fa4b38050 into a91d6f112c1540d7bfcf1026389d4161(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:37,611 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:37,611 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/B, priority=13, startTime=1732141537510; duration=0sec 2024-11-20T22:25:37,612 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:37,612 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:B 2024-11-20T22:25:37,616 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#C#compaction#466 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:37,617 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/20fec8933e19494faa08e65ceb595d76 is 50, key is test_row_0/C:col10/1732141537332/Put/seqid=0 2024-11-20T22:25:37,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141597606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:37,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141597607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:37,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742374_1550 (size=12301) 2024-11-20T22:25:37,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/08baf3ea002c49ad975ca8c03e06d81d 2024-11-20T22:25:37,641 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:37,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:37,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:37,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:37,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:37,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/160f2e7555974b1abd6749908fb7f9a0 is 50, key is test_row_0/C:col10/1732141537384/Put/seqid=0 2024-11-20T22:25:37,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742375_1551 (size=13323) 2024-11-20T22:25:37,675 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/20fec8933e19494faa08e65ceb595d76 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/20fec8933e19494faa08e65ceb595d76 2024-11-20T22:25:37,683 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/C of 6f10e22f8d7346b15976f24fa4b38050 into 20fec8933e19494faa08e65ceb595d76(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:37,684 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:37,684 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/C, priority=13, startTime=1732141537510; duration=0sec 2024-11-20T22:25:37,684 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:37,684 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:C 2024-11-20T22:25:37,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742376_1552 (size=12301) 2024-11-20T22:25:37,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141597723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:37,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141597724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:37,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:37,794 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:37,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:37,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:37,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:37,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:37,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141597938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:37,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141597938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:37,950 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:37,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:37,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:37,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:37,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:37,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:38,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/160f2e7555974b1abd6749908fb7f9a0 2024-11-20T22:25:38,105 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:38,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:38,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:38,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/5b3a5b93bd0041fe8efb634c34655f69 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/5b3a5b93bd0041fe8efb634c34655f69 2024-11-20T22:25:38,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:38,106 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/5b3a5b93bd0041fe8efb634c34655f69, entries=200, sequenceid=434, filesize=14.4 K 2024-11-20T22:25:38,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/08baf3ea002c49ad975ca8c03e06d81d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/08baf3ea002c49ad975ca8c03e06d81d 2024-11-20T22:25:38,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/08baf3ea002c49ad975ca8c03e06d81d, entries=150, sequenceid=434, filesize=12.0 K 2024-11-20T22:25:38,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/160f2e7555974b1abd6749908fb7f9a0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/160f2e7555974b1abd6749908fb7f9a0 2024-11-20T22:25:38,135 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/160f2e7555974b1abd6749908fb7f9a0, entries=150, sequenceid=434, filesize=12.0 K 2024-11-20T22:25:38,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 6f10e22f8d7346b15976f24fa4b38050 in 618ms, sequenceid=434, compaction requested=false 2024-11-20T22:25:38,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:38,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T22:25:38,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:38,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:38,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:38,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:38,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:38,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:38,258 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:38,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:38,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:38,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:38,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/4f318e0226264e1393defb66a9456295 is 50, key is test_row_0/A:col10/1732141538248/Put/seqid=0 2024-11-20T22:25:38,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742377_1553 (size=14741) 2024-11-20T22:25:38,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/4f318e0226264e1393defb66a9456295 2024-11-20T22:25:38,324 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/37f9cf58c1be457f9392fa4db9360a20 is 50, key is test_row_0/B:col10/1732141538248/Put/seqid=0 2024-11-20T22:25:38,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742378_1554 (size=12301) 2024-11-20T22:25:38,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44054 deadline: 1732141598349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44022 deadline: 1732141598349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141598349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,352 DEBUG [Thread-2038 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8211 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., hostname=6365a1e51efd,44631,1732141399950, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:25:38,352 DEBUG [Thread-2036 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8204 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., hostname=6365a1e51efd,44631,1732141399950, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:25:38,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141598351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141598351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,412 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:38,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:38,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:38,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:38,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141598454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141598461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141598461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,564 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:38,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:38,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:38,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:38,565 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:38,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141598662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141598664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141598665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,716 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:38,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:38,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:38,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:38,717 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/37f9cf58c1be457f9392fa4db9360a20 2024-11-20T22:25:38,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/0c89314911014282be4071478bec93c2 is 50, key is test_row_0/C:col10/1732141538248/Put/seqid=0 2024-11-20T22:25:38,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742379_1555 (size=12301) 2024-11-20T22:25:38,872 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:38,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:38,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:38,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:38,873 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141598967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141598975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:38,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141598977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:39,027 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:39,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:39,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:39,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:39,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:39,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,184 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:39,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:39,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:39,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:39,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:39,185 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/0c89314911014282be4071478bec93c2 2024-11-20T22:25:39,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/4f318e0226264e1393defb66a9456295 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/4f318e0226264e1393defb66a9456295 2024-11-20T22:25:39,275 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/4f318e0226264e1393defb66a9456295, entries=200, sequenceid=454, filesize=14.4 K 2024-11-20T22:25:39,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/37f9cf58c1be457f9392fa4db9360a20 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/37f9cf58c1be457f9392fa4db9360a20 2024-11-20T22:25:39,282 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/37f9cf58c1be457f9392fa4db9360a20, entries=150, sequenceid=454, filesize=12.0 K 2024-11-20T22:25:39,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/0c89314911014282be4071478bec93c2 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0c89314911014282be4071478bec93c2 2024-11-20T22:25:39,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0c89314911014282be4071478bec93c2, entries=150, sequenceid=454, filesize=12.0 K 2024-11-20T22:25:39,291 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 6f10e22f8d7346b15976f24fa4b38050 in 1042ms, sequenceid=454, compaction requested=true 2024-11-20T22:25:39,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:39,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:39,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:39,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:39,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:25:39,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:39,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T22:25:39,291 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:39,292 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:39,292 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42703 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:39,292 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/A is initiating minor compaction (all files) 2024-11-20T22:25:39,292 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/A in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:39,293 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a98ce1e0168240078f5151b40fcb6a1b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/5b3a5b93bd0041fe8efb634c34655f69, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/4f318e0226264e1393defb66a9456295] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=41.7 K 2024-11-20T22:25:39,293 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:39,293 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/C is initiating minor compaction (all files) 2024-11-20T22:25:39,293 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/C in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:39,293 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/20fec8933e19494faa08e65ceb595d76, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/160f2e7555974b1abd6749908fb7f9a0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0c89314911014282be4071478bec93c2] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=37.0 K 2024-11-20T22:25:39,293 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting a98ce1e0168240078f5151b40fcb6a1b, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732141536181 2024-11-20T22:25:39,294 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b3a5b93bd0041fe8efb634c34655f69, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732141537384 2024-11-20T22:25:39,294 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20fec8933e19494faa08e65ceb595d76, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732141536181 2024-11-20T22:25:39,294 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 160f2e7555974b1abd6749908fb7f9a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732141537384 2024-11-20T22:25:39,294 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f318e0226264e1393defb66a9456295, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732141537556 2024-11-20T22:25:39,295 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c89314911014282be4071478bec93c2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732141537556 2024-11-20T22:25:39,320 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#C#compaction#471 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:39,321 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/7879871e9f4e42ba93f6f3af60019161 is 50, key is test_row_0/C:col10/1732141538248/Put/seqid=0 2024-11-20T22:25:39,333 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#A#compaction#472 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:39,333 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/64503a1eb8ed4ee68b8b12535378eaaf is 50, key is test_row_0/A:col10/1732141538248/Put/seqid=0 2024-11-20T22:25:39,339 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:39,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:39,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:39,343 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T22:25:39,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:39,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:39,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:39,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:39,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:39,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:39,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742381_1557 (size=13323) 2024-11-20T22:25:39,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742380_1556 (size=13425) 2024-11-20T22:25:39,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/10b07cd84dc045f581567f7a2a737635 is 50, key is test_row_0/A:col10/1732141538337/Put/seqid=0 2024-11-20T22:25:39,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742382_1558 (size=12301) 2024-11-20T22:25:39,459 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/10b07cd84dc045f581567f7a2a737635 2024-11-20T22:25:39,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. as already flushing 2024-11-20T22:25:39,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:39,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/36e1b54baad541b2abddcc66a04e9ada is 50, key is test_row_0/B:col10/1732141538337/Put/seqid=0 2024-11-20T22:25:39,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742383_1559 (size=12301) 2024-11-20T22:25:39,558 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/36e1b54baad541b2abddcc66a04e9ada 2024-11-20T22:25:39,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141599545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:39,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141599547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:39,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141599551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:39,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:39,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/17d240580875482d8eb117cde66e8109 is 50, key is test_row_0/C:col10/1732141538337/Put/seqid=0 2024-11-20T22:25:39,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742384_1560 (size=12301) 2024-11-20T22:25:39,665 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/17d240580875482d8eb117cde66e8109 2024-11-20T22:25:39,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44068 deadline: 1732141599666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:39,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44052 deadline: 1732141599675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:39,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43988 deadline: 1732141599682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:39,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/10b07cd84dc045f581567f7a2a737635 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/10b07cd84dc045f581567f7a2a737635 2024-11-20T22:25:39,707 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/10b07cd84dc045f581567f7a2a737635, entries=150, sequenceid=474, filesize=12.0 K 2024-11-20T22:25:39,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/36e1b54baad541b2abddcc66a04e9ada as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/36e1b54baad541b2abddcc66a04e9ada 2024-11-20T22:25:39,714 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/36e1b54baad541b2abddcc66a04e9ada, entries=150, sequenceid=474, filesize=12.0 K 2024-11-20T22:25:39,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/17d240580875482d8eb117cde66e8109 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/17d240580875482d8eb117cde66e8109 2024-11-20T22:25:39,729 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/17d240580875482d8eb117cde66e8109, entries=150, sequenceid=474, filesize=12.0 K 2024-11-20T22:25:39,735 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 6f10e22f8d7346b15976f24fa4b38050 in 392ms, sequenceid=474, compaction requested=true 2024-11-20T22:25:39,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:39,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:39,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-20T22:25:39,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-20T22:25:39,738 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-20T22:25:39,738 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2470 sec 2024-11-20T22:25:39,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 2.2580 sec 2024-11-20T22:25:39,740 DEBUG [Thread-2045 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40302925 to 127.0.0.1:51822 2024-11-20T22:25:39,740 DEBUG [Thread-2045 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:39,743 DEBUG [Thread-2051 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x02430fee to 127.0.0.1:51822 2024-11-20T22:25:39,743 DEBUG [Thread-2051 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:39,744 DEBUG [Thread-2043 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14088aa9 to 127.0.0.1:51822 2024-11-20T22:25:39,744 DEBUG [Thread-2043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:39,756 DEBUG [Thread-2049 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x567011a8 to 127.0.0.1:51822 2024-11-20T22:25:39,756 DEBUG [Thread-2049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:39,756 DEBUG [Thread-2047 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47ef9951 to 127.0.0.1:51822 2024-11-20T22:25:39,756 DEBUG [Thread-2047 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:39,770 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/64503a1eb8ed4ee68b8b12535378eaaf as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/64503a1eb8ed4ee68b8b12535378eaaf 2024-11-20T22:25:39,774 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/A of 6f10e22f8d7346b15976f24fa4b38050 into 64503a1eb8ed4ee68b8b12535378eaaf(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:39,774 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:39,775 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/A, priority=13, startTime=1732141539291; duration=0sec 2024-11-20T22:25:39,775 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:39,775 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:A 2024-11-20T22:25:39,775 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:39,777 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:39,777 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/B is initiating minor compaction (all files) 2024-11-20T22:25:39,777 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/B in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:39,777 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/a91d6f112c1540d7bfcf1026389d4161, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/08baf3ea002c49ad975ca8c03e06d81d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/37f9cf58c1be457f9392fa4db9360a20, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/36e1b54baad541b2abddcc66a04e9ada] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=49.0 K 2024-11-20T22:25:39,777 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting a91d6f112c1540d7bfcf1026389d4161, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732141536181 2024-11-20T22:25:39,778 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 08baf3ea002c49ad975ca8c03e06d81d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732141537384 2024-11-20T22:25:39,778 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 37f9cf58c1be457f9392fa4db9360a20, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732141537556 2024-11-20T22:25:39,778 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 36e1b54baad541b2abddcc66a04e9ada, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732141538324 2024-11-20T22:25:39,798 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#B#compaction#476 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:39,803 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/96c2226c8b87497fa7d935e25f183160 is 50, key is test_row_0/B:col10/1732141538337/Put/seqid=0 2024-11-20T22:25:39,804 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/7879871e9f4e42ba93f6f3af60019161 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/7879871e9f4e42ba93f6f3af60019161 2024-11-20T22:25:39,809 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/C of 6f10e22f8d7346b15976f24fa4b38050 into 7879871e9f4e42ba93f6f3af60019161(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:39,809 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:39,810 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/C, priority=13, startTime=1732141539291; duration=0sec 2024-11-20T22:25:39,810 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:39,810 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:C 2024-11-20T22:25:39,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742385_1561 (size=13459) 2024-11-20T22:25:39,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:39,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T22:25:39,889 DEBUG [Thread-2032 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38dd8644 to 127.0.0.1:51822 2024-11-20T22:25:39,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:39,889 DEBUG [Thread-2032 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:39,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:39,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:39,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:39,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:39,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:39,893 DEBUG [Thread-2040 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x39b3baa5 to 127.0.0.1:51822 2024-11-20T22:25:39,893 DEBUG [Thread-2040 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:39,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/b6ec1fd560214a5b94cd2aaa8813255f is 50, key is test_row_0/A:col10/1732141539528/Put/seqid=0 2024-11-20T22:25:39,900 DEBUG [Thread-2034 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65e17c26 to 127.0.0.1:51822 2024-11-20T22:25:39,900 DEBUG [Thread-2034 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:39,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742386_1562 (size=12301) 2024-11-20T22:25:40,252 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/96c2226c8b87497fa7d935e25f183160 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/96c2226c8b87497fa7d935e25f183160 2024-11-20T22:25:40,258 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/B of 6f10e22f8d7346b15976f24fa4b38050 into 96c2226c8b87497fa7d935e25f183160(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:40,258 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:40,258 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/B, priority=12, startTime=1732141539291; duration=0sec 2024-11-20T22:25:40,258 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:40,258 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:B 2024-11-20T22:25:40,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/b6ec1fd560214a5b94cd2aaa8813255f 2024-11-20T22:25:40,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/c994013816ae4082b41583b514e1fc6f is 50, key is test_row_0/B:col10/1732141539528/Put/seqid=0 2024-11-20T22:25:40,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742387_1563 (size=12301) 2024-11-20T22:25:40,732 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/c994013816ae4082b41583b514e1fc6f 2024-11-20T22:25:40,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/0defeed45d1f4553a9da3aedd5d56820 is 50, key is test_row_0/C:col10/1732141539528/Put/seqid=0 2024-11-20T22:25:40,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742388_1564 (size=12301) 2024-11-20T22:25:41,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/0defeed45d1f4553a9da3aedd5d56820 2024-11-20T22:25:41,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/b6ec1fd560214a5b94cd2aaa8813255f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/b6ec1fd560214a5b94cd2aaa8813255f 2024-11-20T22:25:41,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/b6ec1fd560214a5b94cd2aaa8813255f, entries=150, sequenceid=493, filesize=12.0 K 2024-11-20T22:25:41,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/c994013816ae4082b41583b514e1fc6f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c994013816ae4082b41583b514e1fc6f 2024-11-20T22:25:41,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c994013816ae4082b41583b514e1fc6f, entries=150, sequenceid=493, filesize=12.0 K 2024-11-20T22:25:41,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/0defeed45d1f4553a9da3aedd5d56820 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0defeed45d1f4553a9da3aedd5d56820 2024-11-20T22:25:41,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0defeed45d1f4553a9da3aedd5d56820, entries=150, sequenceid=493, filesize=12.0 K 2024-11-20T22:25:41,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=13.42 KB/13740 for 6f10e22f8d7346b15976f24fa4b38050 in 1331ms, sequenceid=493, compaction requested=true 2024-11-20T22:25:41,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:41,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:41,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:41,220 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:41,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:41,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:41,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6f10e22f8d7346b15976f24fa4b38050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:41,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:25:41,222 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:41,230 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:41,230 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/A is initiating minor compaction (all files) 2024-11-20T22:25:41,230 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/A in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:41,231 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/64503a1eb8ed4ee68b8b12535378eaaf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/10b07cd84dc045f581567f7a2a737635, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/b6ec1fd560214a5b94cd2aaa8813255f] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=37.0 K 2024-11-20T22:25:41,238 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64503a1eb8ed4ee68b8b12535378eaaf, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732141537556 2024-11-20T22:25:41,238 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:41,238 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): 6f10e22f8d7346b15976f24fa4b38050/C is initiating minor compaction (all files) 2024-11-20T22:25:41,238 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6f10e22f8d7346b15976f24fa4b38050/C in TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:41,238 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/7879871e9f4e42ba93f6f3af60019161, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/17d240580875482d8eb117cde66e8109, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0defeed45d1f4553a9da3aedd5d56820] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp, totalSize=37.1 K 2024-11-20T22:25:41,241 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10b07cd84dc045f581567f7a2a737635, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732141538324 2024-11-20T22:25:41,241 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 7879871e9f4e42ba93f6f3af60019161, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732141537556 2024-11-20T22:25:41,247 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6ec1fd560214a5b94cd2aaa8813255f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=493, earliestPutTs=1732141539528 2024-11-20T22:25:41,249 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 17d240580875482d8eb117cde66e8109, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732141538324 2024-11-20T22:25:41,254 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0defeed45d1f4553a9da3aedd5d56820, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=493, earliestPutTs=1732141539528 2024-11-20T22:25:41,267 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#A#compaction#480 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:41,268 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/2d910dfa034646a193ba350752555ddc is 50, key is test_row_0/A:col10/1732141539528/Put/seqid=0 2024-11-20T22:25:41,270 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f10e22f8d7346b15976f24fa4b38050#C#compaction#481 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:41,270 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/17f48c4301024c51ab98ba2579feab32 is 50, key is test_row_0/C:col10/1732141539528/Put/seqid=0 2024-11-20T22:25:41,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742389_1565 (size=13425) 2024-11-20T22:25:41,276 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/2d910dfa034646a193ba350752555ddc as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2d910dfa034646a193ba350752555ddc 2024-11-20T22:25:41,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742390_1566 (size=13527) 2024-11-20T22:25:41,284 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/A of 6f10e22f8d7346b15976f24fa4b38050 into 2d910dfa034646a193ba350752555ddc(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:41,284 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/17f48c4301024c51ab98ba2579feab32 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/17f48c4301024c51ab98ba2579feab32 2024-11-20T22:25:41,284 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:41,284 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/A, priority=13, startTime=1732141541219; duration=0sec 2024-11-20T22:25:41,284 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:41,284 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:A 2024-11-20T22:25:41,284 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-20T22:25:41,286 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-20T22:25:41,286 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-20T22:25:41,286 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. because compaction request was cancelled 2024-11-20T22:25:41,286 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:B 2024-11-20T22:25:41,305 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6f10e22f8d7346b15976f24fa4b38050/C of 6f10e22f8d7346b15976f24fa4b38050 into 17f48c4301024c51ab98ba2579feab32(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:41,305 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:41,305 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050., storeName=6f10e22f8d7346b15976f24fa4b38050/C, priority=13, startTime=1732141541220; duration=0sec 2024-11-20T22:25:41,305 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:41,305 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f10e22f8d7346b15976f24fa4b38050:C 2024-11-20T22:25:41,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:41,599 INFO [Thread-2042 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-20T22:25:48,088 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T22:25:48,372 DEBUG [Thread-2036 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53fc02ba to 127.0.0.1:51822 2024-11-20T22:25:48,372 DEBUG [Thread-2036 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:48,415 DEBUG [Thread-2038 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2011d733 to 127.0.0.1:51822 2024-11-20T22:25:48,415 DEBUG [Thread-2038 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 130 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 45 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 121 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1331 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3993 rows 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1349 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4047 rows 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1338 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4014 rows 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1339 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4017 rows 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1342 2024-11-20T22:25:48,415 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4026 rows 2024-11-20T22:25:48,415 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:25:48,415 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x59daaa82 to 127.0.0.1:51822 2024-11-20T22:25:48,415 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:48,417 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T22:25:48,418 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T22:25:48,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:48,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T22:25:48,422 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141548421"}]},"ts":"1732141548421"} 2024-11-20T22:25:48,423 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T22:25:48,460 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T22:25:48,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:25:48,464 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6f10e22f8d7346b15976f24fa4b38050, UNASSIGN}] 2024-11-20T22:25:48,465 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6f10e22f8d7346b15976f24fa4b38050, UNASSIGN 2024-11-20T22:25:48,466 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=6f10e22f8d7346b15976f24fa4b38050, regionState=CLOSING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:48,467 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:25:48,467 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41349 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=CLOSING, location=6365a1e51efd,44631,1732141399950, table=TestAcidGuarantees, region=6f10e22f8d7346b15976f24fa4b38050. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-20T22:25:48,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; CloseRegionProcedure 6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:25:48,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T22:25:48,618 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:48,619 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(124): Close 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:48,619 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:25:48,619 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1681): Closing 6f10e22f8d7346b15976f24fa4b38050, disabling compactions & flushes 2024-11-20T22:25:48,619 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:48,619 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:48,619 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. after waiting 0 ms 2024-11-20T22:25:48,619 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:48,620 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(2837): Flushing 6f10e22f8d7346b15976f24fa4b38050 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T22:25:48,620 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=A 2024-11-20T22:25:48,620 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:48,620 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=B 2024-11-20T22:25:48,620 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:48,620 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6f10e22f8d7346b15976f24fa4b38050, store=C 2024-11-20T22:25:48,620 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:48,628 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/e522fe8761634db490a847997085e9eb is 50, key is test_row_1/A:col10/1732141539898/Put/seqid=0 2024-11-20T22:25:48,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742391_1567 (size=9857) 2024-11-20T22:25:48,672 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=503 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/e522fe8761634db490a847997085e9eb 2024-11-20T22:25:48,708 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/e4c8626c0fcd42e08d34f52928cf4f24 is 50, key is test_row_1/B:col10/1732141539898/Put/seqid=0 2024-11-20T22:25:48,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T22:25:48,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742392_1568 (size=9857) 2024-11-20T22:25:48,726 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=503 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/e4c8626c0fcd42e08d34f52928cf4f24 2024-11-20T22:25:48,745 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/543acdb859c34b898c03d82147bec93f is 50, key is test_row_1/C:col10/1732141539898/Put/seqid=0 2024-11-20T22:25:48,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742393_1569 (size=9857) 2024-11-20T22:25:48,763 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=503 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/543acdb859c34b898c03d82147bec93f 2024-11-20T22:25:48,782 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/A/e522fe8761634db490a847997085e9eb as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/e522fe8761634db490a847997085e9eb 2024-11-20T22:25:48,809 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/e522fe8761634db490a847997085e9eb, entries=100, sequenceid=503, filesize=9.6 K 2024-11-20T22:25:48,809 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/B/e4c8626c0fcd42e08d34f52928cf4f24 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/e4c8626c0fcd42e08d34f52928cf4f24 2024-11-20T22:25:48,816 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/e4c8626c0fcd42e08d34f52928cf4f24, entries=100, sequenceid=503, filesize=9.6 K 2024-11-20T22:25:48,827 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/.tmp/C/543acdb859c34b898c03d82147bec93f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/543acdb859c34b898c03d82147bec93f 2024-11-20T22:25:48,834 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/543acdb859c34b898c03d82147bec93f, entries=100, sequenceid=503, filesize=9.6 K 2024-11-20T22:25:48,840 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 6f10e22f8d7346b15976f24fa4b38050 in 220ms, sequenceid=503, compaction requested=true 2024-11-20T22:25:48,862 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/17bc300bc24d4e2e8dc6ed5a249d99c1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6ce23a98c6dc40d9858b6476794d38b7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/20af2dc8221e40ddb22a284d81f20e4c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6abece6dbcc54d7ea52aae28a9129022, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6aef765046184848935dd61868367f9b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ecd7560727ed474abbb245670d3e8e4f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a946dcc5b2d84cfbbe96bc1b9b58a4fb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/30c131318c14439eb2095682f72441ee, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/8385af1a6970439cba58b3b87b9931a3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/720a03e28ef648e1bc034fd11fe1b75f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/72f4ad1ad1884396bb4397344779aeeb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ca2d1b65414544759a12d80349385fea, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/e194aa39de1d4eeb837a7386faff706f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ff8e04397b8c4b5a9d5dd38b1287d37c, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6376d112d727411f92029fe5078f1ff5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/668b37cf8cdf41ce86b96a30de413bd8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/46cce630536345bcb4833eb6b0210090, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6d61e069e689497c8ce0dc7ea0978009, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/7f43ac5dee324cad8544cb79f2cd857d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/14a15e5c8a384ae09e6f9ceb6a771da3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/bfe1c16450524718a2ba3387ac84ad90, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/1bf1aef0573d4621ba6728f5d3133d6a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/bdf0292f32b745049006ef1c42403572, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/c65e1169849e41bd813526ae8e7d2e33, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/00f1e27366234fa18388e37dad535171, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2ad0221cab114c98979d2dfca61dbca6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/1ecc00a4486242988ab6320dcf6683f6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a98ce1e0168240078f5151b40fcb6a1b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/5b3a5b93bd0041fe8efb634c34655f69, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/4f318e0226264e1393defb66a9456295, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/64503a1eb8ed4ee68b8b12535378eaaf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/10b07cd84dc045f581567f7a2a737635, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/b6ec1fd560214a5b94cd2aaa8813255f] to archive 2024-11-20T22:25:48,866 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:48,872 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/17bc300bc24d4e2e8dc6ed5a249d99c1 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/17bc300bc24d4e2e8dc6ed5a249d99c1 2024-11-20T22:25:48,873 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6ce23a98c6dc40d9858b6476794d38b7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6ce23a98c6dc40d9858b6476794d38b7 2024-11-20T22:25:48,875 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/20af2dc8221e40ddb22a284d81f20e4c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/20af2dc8221e40ddb22a284d81f20e4c 2024-11-20T22:25:48,877 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6abece6dbcc54d7ea52aae28a9129022 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6abece6dbcc54d7ea52aae28a9129022 2024-11-20T22:25:48,879 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6aef765046184848935dd61868367f9b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6aef765046184848935dd61868367f9b 2024-11-20T22:25:48,881 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ecd7560727ed474abbb245670d3e8e4f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ecd7560727ed474abbb245670d3e8e4f 2024-11-20T22:25:48,893 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a946dcc5b2d84cfbbe96bc1b9b58a4fb to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a946dcc5b2d84cfbbe96bc1b9b58a4fb 2024-11-20T22:25:48,894 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/30c131318c14439eb2095682f72441ee to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/30c131318c14439eb2095682f72441ee 2024-11-20T22:25:48,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/8385af1a6970439cba58b3b87b9931a3 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/8385af1a6970439cba58b3b87b9931a3 2024-11-20T22:25:48,900 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/720a03e28ef648e1bc034fd11fe1b75f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/720a03e28ef648e1bc034fd11fe1b75f 2024-11-20T22:25:48,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/72f4ad1ad1884396bb4397344779aeeb to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/72f4ad1ad1884396bb4397344779aeeb 2024-11-20T22:25:48,905 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ca2d1b65414544759a12d80349385fea to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ca2d1b65414544759a12d80349385fea 2024-11-20T22:25:48,912 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/e194aa39de1d4eeb837a7386faff706f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/e194aa39de1d4eeb837a7386faff706f 2024-11-20T22:25:48,914 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ff8e04397b8c4b5a9d5dd38b1287d37c to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/ff8e04397b8c4b5a9d5dd38b1287d37c 2024-11-20T22:25:48,915 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6376d112d727411f92029fe5078f1ff5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6376d112d727411f92029fe5078f1ff5 2024-11-20T22:25:48,917 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/668b37cf8cdf41ce86b96a30de413bd8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/668b37cf8cdf41ce86b96a30de413bd8 2024-11-20T22:25:48,920 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/46cce630536345bcb4833eb6b0210090 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/46cce630536345bcb4833eb6b0210090 2024-11-20T22:25:48,921 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6d61e069e689497c8ce0dc7ea0978009 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/6d61e069e689497c8ce0dc7ea0978009 2024-11-20T22:25:48,922 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/7f43ac5dee324cad8544cb79f2cd857d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/7f43ac5dee324cad8544cb79f2cd857d 2024-11-20T22:25:48,926 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/14a15e5c8a384ae09e6f9ceb6a771da3 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/14a15e5c8a384ae09e6f9ceb6a771da3 2024-11-20T22:25:48,927 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/bfe1c16450524718a2ba3387ac84ad90 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/bfe1c16450524718a2ba3387ac84ad90 2024-11-20T22:25:48,928 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/1bf1aef0573d4621ba6728f5d3133d6a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/1bf1aef0573d4621ba6728f5d3133d6a 2024-11-20T22:25:48,930 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/bdf0292f32b745049006ef1c42403572 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/bdf0292f32b745049006ef1c42403572 2024-11-20T22:25:48,935 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/c65e1169849e41bd813526ae8e7d2e33 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/c65e1169849e41bd813526ae8e7d2e33 2024-11-20T22:25:48,936 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/00f1e27366234fa18388e37dad535171 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/00f1e27366234fa18388e37dad535171 2024-11-20T22:25:48,937 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2ad0221cab114c98979d2dfca61dbca6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2ad0221cab114c98979d2dfca61dbca6 2024-11-20T22:25:48,939 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/1ecc00a4486242988ab6320dcf6683f6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/1ecc00a4486242988ab6320dcf6683f6 2024-11-20T22:25:48,947 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a98ce1e0168240078f5151b40fcb6a1b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/a98ce1e0168240078f5151b40fcb6a1b 2024-11-20T22:25:48,952 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/5b3a5b93bd0041fe8efb634c34655f69 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/5b3a5b93bd0041fe8efb634c34655f69 2024-11-20T22:25:48,955 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/4f318e0226264e1393defb66a9456295 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/4f318e0226264e1393defb66a9456295 2024-11-20T22:25:48,958 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/64503a1eb8ed4ee68b8b12535378eaaf to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/64503a1eb8ed4ee68b8b12535378eaaf 2024-11-20T22:25:48,962 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/10b07cd84dc045f581567f7a2a737635 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/10b07cd84dc045f581567f7a2a737635 2024-11-20T22:25:48,971 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/b6ec1fd560214a5b94cd2aaa8813255f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/b6ec1fd560214a5b94cd2aaa8813255f 2024-11-20T22:25:48,980 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/1d9d2645985144e088f9af84c4466357, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/5ebdf9c9d7f64f0184af529b5bb1aff8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/27a1acff636149df902fc703c6740a0a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/68a2fe6c15974c1dbdd92fce72207d42, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/5f5347cd339540ad8d8de3ba20b66985, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/89622cdcbd554d9ea719f9880a913082, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c8eb15a5e20a479693c37c68a9bd86e5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/a880c0a993754332b3835be1cd80c709, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/b1295d2e9577427a9d392c395ba52e4a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/72f815ac791b4771b1226bb9805703ed, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/476500c13c4b4e73acde6c4410a2980f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/86a5a1e434c84e5cb07dc19ea3239704, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/cb789950f7a94c689b6ab05af0d62149, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ac61ad1f43bb4306b36f8a63702081ef, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ec2136fb57fc4ce99f89621b45159164, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/3eb67c122a55445b8774af2ed26a978b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/37996cfa94fa42799bca2e71e28c6058, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/8b657c38599e4fc8a5f9ce2d57dea0f8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/dcb82d8c320d4c4aa8000e1094c7e700, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/47e785e703784534ada512067c39f6bb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/baf5f7850bef4fb3af519d2d81d83d74, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/1ac9e422845b44a5b5f5a860b9787785, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/3f7d7ed385f74190a56fa9d846274b79, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/47ff964aa70c42559f9e06e7d376a6fb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c27a7c54395943fa9b66bd70b92dd0d5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/0cceadd306d844caaa48d81ff580f431, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/d23211cfc21147ee9b1526bb3b9720e5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/f9bfb1f46eb242d5927e675cf190fc65, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ca2cae3b4564422692d464b4398b4c31, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/a91d6f112c1540d7bfcf1026389d4161, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/6d931755b819488bbd019176debb3fcc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/08baf3ea002c49ad975ca8c03e06d81d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/37f9cf58c1be457f9392fa4db9360a20, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/36e1b54baad541b2abddcc66a04e9ada] to archive 2024-11-20T22:25:48,981 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:48,985 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/1d9d2645985144e088f9af84c4466357 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/1d9d2645985144e088f9af84c4466357 2024-11-20T22:25:48,988 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/5ebdf9c9d7f64f0184af529b5bb1aff8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/5ebdf9c9d7f64f0184af529b5bb1aff8 2024-11-20T22:25:48,991 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/27a1acff636149df902fc703c6740a0a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/27a1acff636149df902fc703c6740a0a 2024-11-20T22:25:48,995 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/68a2fe6c15974c1dbdd92fce72207d42 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/68a2fe6c15974c1dbdd92fce72207d42 2024-11-20T22:25:49,003 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/5f5347cd339540ad8d8de3ba20b66985 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/5f5347cd339540ad8d8de3ba20b66985 2024-11-20T22:25:49,006 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/89622cdcbd554d9ea719f9880a913082 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/89622cdcbd554d9ea719f9880a913082 2024-11-20T22:25:49,010 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c8eb15a5e20a479693c37c68a9bd86e5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c8eb15a5e20a479693c37c68a9bd86e5 2024-11-20T22:25:49,016 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/a880c0a993754332b3835be1cd80c709 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/a880c0a993754332b3835be1cd80c709 2024-11-20T22:25:49,017 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/b1295d2e9577427a9d392c395ba52e4a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/b1295d2e9577427a9d392c395ba52e4a 2024-11-20T22:25:49,018 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/72f815ac791b4771b1226bb9805703ed to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/72f815ac791b4771b1226bb9805703ed 2024-11-20T22:25:49,020 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/476500c13c4b4e73acde6c4410a2980f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/476500c13c4b4e73acde6c4410a2980f 2024-11-20T22:25:49,022 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/86a5a1e434c84e5cb07dc19ea3239704 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/86a5a1e434c84e5cb07dc19ea3239704 2024-11-20T22:25:49,025 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/cb789950f7a94c689b6ab05af0d62149 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/cb789950f7a94c689b6ab05af0d62149 2024-11-20T22:25:49,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T22:25:49,027 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ac61ad1f43bb4306b36f8a63702081ef to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ac61ad1f43bb4306b36f8a63702081ef 2024-11-20T22:25:49,028 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ec2136fb57fc4ce99f89621b45159164 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ec2136fb57fc4ce99f89621b45159164 2024-11-20T22:25:49,029 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/3eb67c122a55445b8774af2ed26a978b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/3eb67c122a55445b8774af2ed26a978b 2024-11-20T22:25:49,032 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/37996cfa94fa42799bca2e71e28c6058 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/37996cfa94fa42799bca2e71e28c6058 2024-11-20T22:25:49,036 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/8b657c38599e4fc8a5f9ce2d57dea0f8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/8b657c38599e4fc8a5f9ce2d57dea0f8 2024-11-20T22:25:49,037 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/dcb82d8c320d4c4aa8000e1094c7e700 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/dcb82d8c320d4c4aa8000e1094c7e700 2024-11-20T22:25:49,038 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/47e785e703784534ada512067c39f6bb to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/47e785e703784534ada512067c39f6bb 2024-11-20T22:25:49,040 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/baf5f7850bef4fb3af519d2d81d83d74 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/baf5f7850bef4fb3af519d2d81d83d74 2024-11-20T22:25:49,043 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/1ac9e422845b44a5b5f5a860b9787785 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/1ac9e422845b44a5b5f5a860b9787785 2024-11-20T22:25:49,046 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/3f7d7ed385f74190a56fa9d846274b79 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/3f7d7ed385f74190a56fa9d846274b79 2024-11-20T22:25:49,048 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/47ff964aa70c42559f9e06e7d376a6fb to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/47ff964aa70c42559f9e06e7d376a6fb 2024-11-20T22:25:49,055 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c27a7c54395943fa9b66bd70b92dd0d5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c27a7c54395943fa9b66bd70b92dd0d5 2024-11-20T22:25:49,057 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/0cceadd306d844caaa48d81ff580f431 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/0cceadd306d844caaa48d81ff580f431 2024-11-20T22:25:49,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/d23211cfc21147ee9b1526bb3b9720e5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/d23211cfc21147ee9b1526bb3b9720e5 2024-11-20T22:25:49,072 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/f9bfb1f46eb242d5927e675cf190fc65 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/f9bfb1f46eb242d5927e675cf190fc65 2024-11-20T22:25:49,075 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ca2cae3b4564422692d464b4398b4c31 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/ca2cae3b4564422692d464b4398b4c31 2024-11-20T22:25:49,085 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/a91d6f112c1540d7bfcf1026389d4161 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/a91d6f112c1540d7bfcf1026389d4161 2024-11-20T22:25:49,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/6d931755b819488bbd019176debb3fcc to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/6d931755b819488bbd019176debb3fcc 2024-11-20T22:25:49,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/08baf3ea002c49ad975ca8c03e06d81d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/08baf3ea002c49ad975ca8c03e06d81d 2024-11-20T22:25:49,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/37f9cf58c1be457f9392fa4db9360a20 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/37f9cf58c1be457f9392fa4db9360a20 2024-11-20T22:25:49,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/36e1b54baad541b2abddcc66a04e9ada to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/36e1b54baad541b2abddcc66a04e9ada 2024-11-20T22:25:49,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/de49e1443bc24a6c967ef314478faf0a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/9c8c9887614c4699b7b959ca88da0a95, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/bb1909c36c2344b49eddd48a8f5dc609, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/2e9047ab207f4810bc440cbab76f7c10, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0df9ed5d562d446db72eb7d3a578100f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8b6b7780637c4bdda5486f711f40a53d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/a72fcd91c61f4559a59c5534ae03c6dc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/b6861bc5332a49a4a36accdda92149aa, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/fa052f34817e476cb10c3390270377f7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/24a53feed4de4081aa851ba2b36fd10f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/4d5e209cad1246259dda5ec5322b41ce, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0fbda6d3f6c149a8944c423860de4cc8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/d4d25fb7022f428194cf794934e503ba, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/37fbdb7af9d645fca4be0a0deea235fb, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/595d8c03a3aa4787960185f0f629701a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/d6bb2eeaa5ec496888df661296101b41, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/a9574a5dd95946ed8b10eab78eac351b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/271d5d3651cc49368b039c1761d8034d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/5b6a0e211fde43cbb9e5dc9b233f8435, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/43709d321f1f49b69faf1b1993b37517, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/bf23b929857343a5a9236b6d60e7facf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8bcbb471ca7441d699c63a0ff01ac331, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/139e1b645f1f41b69f863990ef31c40f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/6a60e1c3f80f4a5caa548c2c6e5ea94d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/c770860fc8e74dfaaaa51edb91a3bdb3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8974c75f93a54e03a6bdc14ef98fb326, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/cc12c05b6591469b968377c1d99d07f0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/aef9b4cf1a69470fbff2ae6ad1e5a13f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/870bb9f4d67549aab859562ee98be0da, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/20fec8933e19494faa08e65ceb595d76, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/b6d747aa7c7c40dd80a926b33e4d4697, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/160f2e7555974b1abd6749908fb7f9a0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/7879871e9f4e42ba93f6f3af60019161, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0c89314911014282be4071478bec93c2, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/17d240580875482d8eb117cde66e8109, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0defeed45d1f4553a9da3aedd5d56820] to archive 2024-11-20T22:25:49,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:49,104 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/de49e1443bc24a6c967ef314478faf0a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/de49e1443bc24a6c967ef314478faf0a 2024-11-20T22:25:49,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/9c8c9887614c4699b7b959ca88da0a95 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/9c8c9887614c4699b7b959ca88da0a95 2024-11-20T22:25:49,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/bb1909c36c2344b49eddd48a8f5dc609 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/bb1909c36c2344b49eddd48a8f5dc609 2024-11-20T22:25:49,112 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/2e9047ab207f4810bc440cbab76f7c10 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/2e9047ab207f4810bc440cbab76f7c10 2024-11-20T22:25:49,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0df9ed5d562d446db72eb7d3a578100f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0df9ed5d562d446db72eb7d3a578100f 2024-11-20T22:25:49,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8b6b7780637c4bdda5486f711f40a53d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8b6b7780637c4bdda5486f711f40a53d 2024-11-20T22:25:49,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/a72fcd91c61f4559a59c5534ae03c6dc to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/a72fcd91c61f4559a59c5534ae03c6dc 2024-11-20T22:25:49,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/b6861bc5332a49a4a36accdda92149aa to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/b6861bc5332a49a4a36accdda92149aa 2024-11-20T22:25:49,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/fa052f34817e476cb10c3390270377f7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/fa052f34817e476cb10c3390270377f7 2024-11-20T22:25:49,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/24a53feed4de4081aa851ba2b36fd10f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/24a53feed4de4081aa851ba2b36fd10f 2024-11-20T22:25:49,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/4d5e209cad1246259dda5ec5322b41ce to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/4d5e209cad1246259dda5ec5322b41ce 2024-11-20T22:25:49,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0fbda6d3f6c149a8944c423860de4cc8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0fbda6d3f6c149a8944c423860de4cc8 2024-11-20T22:25:49,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/d4d25fb7022f428194cf794934e503ba to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/d4d25fb7022f428194cf794934e503ba 2024-11-20T22:25:49,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/37fbdb7af9d645fca4be0a0deea235fb to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/37fbdb7af9d645fca4be0a0deea235fb 2024-11-20T22:25:49,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/595d8c03a3aa4787960185f0f629701a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/595d8c03a3aa4787960185f0f629701a 2024-11-20T22:25:49,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/d6bb2eeaa5ec496888df661296101b41 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/d6bb2eeaa5ec496888df661296101b41 2024-11-20T22:25:49,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/a9574a5dd95946ed8b10eab78eac351b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/a9574a5dd95946ed8b10eab78eac351b 2024-11-20T22:25:49,133 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/271d5d3651cc49368b039c1761d8034d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/271d5d3651cc49368b039c1761d8034d 2024-11-20T22:25:49,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/5b6a0e211fde43cbb9e5dc9b233f8435 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/5b6a0e211fde43cbb9e5dc9b233f8435 2024-11-20T22:25:49,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/43709d321f1f49b69faf1b1993b37517 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/43709d321f1f49b69faf1b1993b37517 2024-11-20T22:25:49,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/bf23b929857343a5a9236b6d60e7facf to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/bf23b929857343a5a9236b6d60e7facf 2024-11-20T22:25:49,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8bcbb471ca7441d699c63a0ff01ac331 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8bcbb471ca7441d699c63a0ff01ac331 2024-11-20T22:25:49,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/139e1b645f1f41b69f863990ef31c40f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/139e1b645f1f41b69f863990ef31c40f 2024-11-20T22:25:49,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/6a60e1c3f80f4a5caa548c2c6e5ea94d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/6a60e1c3f80f4a5caa548c2c6e5ea94d 2024-11-20T22:25:49,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/c770860fc8e74dfaaaa51edb91a3bdb3 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/c770860fc8e74dfaaaa51edb91a3bdb3 2024-11-20T22:25:49,165 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8974c75f93a54e03a6bdc14ef98fb326 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/8974c75f93a54e03a6bdc14ef98fb326 2024-11-20T22:25:49,179 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/cc12c05b6591469b968377c1d99d07f0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/cc12c05b6591469b968377c1d99d07f0 2024-11-20T22:25:49,185 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/aef9b4cf1a69470fbff2ae6ad1e5a13f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/aef9b4cf1a69470fbff2ae6ad1e5a13f 2024-11-20T22:25:49,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/870bb9f4d67549aab859562ee98be0da to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/870bb9f4d67549aab859562ee98be0da 2024-11-20T22:25:49,192 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/20fec8933e19494faa08e65ceb595d76 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/20fec8933e19494faa08e65ceb595d76 2024-11-20T22:25:49,195 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/b6d747aa7c7c40dd80a926b33e4d4697 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/b6d747aa7c7c40dd80a926b33e4d4697 2024-11-20T22:25:49,197 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/160f2e7555974b1abd6749908fb7f9a0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/160f2e7555974b1abd6749908fb7f9a0 2024-11-20T22:25:49,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/7879871e9f4e42ba93f6f3af60019161 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/7879871e9f4e42ba93f6f3af60019161 2024-11-20T22:25:49,201 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0c89314911014282be4071478bec93c2 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0c89314911014282be4071478bec93c2 2024-11-20T22:25:49,204 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/17d240580875482d8eb117cde66e8109 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/17d240580875482d8eb117cde66e8109 2024-11-20T22:25:49,206 DEBUG [StoreCloser-TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0defeed45d1f4553a9da3aedd5d56820 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/0defeed45d1f4553a9da3aedd5d56820 2024-11-20T22:25:49,226 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/recovered.edits/506.seqid, newMaxSeqId=506, maxSeqId=1 2024-11-20T22:25:49,228 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050. 2024-11-20T22:25:49,228 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1635): Region close journal for 6f10e22f8d7346b15976f24fa4b38050: 2024-11-20T22:25:49,239 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(170): Closed 6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:49,247 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=6f10e22f8d7346b15976f24fa4b38050, regionState=CLOSED 2024-11-20T22:25:49,256 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-20T22:25:49,256 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseRegionProcedure 6f10e22f8d7346b15976f24fa4b38050, server=6365a1e51efd,44631,1732141399950 in 788 msec 2024-11-20T22:25:49,258 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-11-20T22:25:49,258 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6f10e22f8d7346b15976f24fa4b38050, UNASSIGN in 792 msec 2024-11-20T22:25:49,259 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-20T22:25:49,259 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 796 msec 2024-11-20T22:25:49,260 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141549260"}]},"ts":"1732141549260"} 2024-11-20T22:25:49,261 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T22:25:49,299 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T22:25:49,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 884 msec 2024-11-20T22:25:49,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T22:25:49,531 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-20T22:25:49,531 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T22:25:49,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:49,534 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=148, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:49,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T22:25:49,534 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=148, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:49,536 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:49,538 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/recovered.edits] 2024-11-20T22:25:49,541 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2d910dfa034646a193ba350752555ddc to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/2d910dfa034646a193ba350752555ddc 2024-11-20T22:25:49,544 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/e522fe8761634db490a847997085e9eb to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/A/e522fe8761634db490a847997085e9eb 2024-11-20T22:25:49,549 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/96c2226c8b87497fa7d935e25f183160 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/96c2226c8b87497fa7d935e25f183160 2024-11-20T22:25:49,553 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c994013816ae4082b41583b514e1fc6f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/c994013816ae4082b41583b514e1fc6f 2024-11-20T22:25:49,553 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/e4c8626c0fcd42e08d34f52928cf4f24 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/B/e4c8626c0fcd42e08d34f52928cf4f24 2024-11-20T22:25:49,555 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/17f48c4301024c51ab98ba2579feab32 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/17f48c4301024c51ab98ba2579feab32 2024-11-20T22:25:49,556 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/543acdb859c34b898c03d82147bec93f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/C/543acdb859c34b898c03d82147bec93f 2024-11-20T22:25:49,559 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/recovered.edits/506.seqid to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050/recovered.edits/506.seqid 2024-11-20T22:25:49,559 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/6f10e22f8d7346b15976f24fa4b38050 2024-11-20T22:25:49,559 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T22:25:49,563 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=148, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:49,576 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T22:25:49,585 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T22:25:49,591 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=148, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:49,591 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T22:25:49,592 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732141549591"}]},"ts":"9223372036854775807"} 2024-11-20T22:25:49,612 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T22:25:49,613 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T22:25:49,613 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6f10e22f8d7346b15976f24fa4b38050, NAME => 'TestAcidGuarantees,,1732141518143.6f10e22f8d7346b15976f24fa4b38050.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T22:25:49,613 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T22:25:49,613 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732141549613"}]},"ts":"9223372036854775807"} 2024-11-20T22:25:49,615 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T22:25:49,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T22:25:49,675 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=148, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:49,676 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 144 msec 2024-11-20T22:25:49,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T22:25:49,836 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-20T22:25:49,849 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=237 (was 240), OpenFileDescriptor=451 (was 461), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1077 (was 1037) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1552 (was 2546) 2024-11-20T22:25:49,860 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=237, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=1077, ProcessCount=11, AvailableMemoryMB=1551 2024-11-20T22:25:49,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:25:49,862 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:25:49,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=149, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:49,864 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:25:49,865 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:49,865 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 149 2024-11-20T22:25:49,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T22:25:49,865 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:25:49,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742394_1570 (size=960) 2024-11-20T22:25:49,915 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a 2024-11-20T22:25:49,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742395_1571 (size=53) 2024-11-20T22:25:49,943 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:25:49,943 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing bacf880704606b04149d74a04b9ceb0e, disabling compactions & flushes 2024-11-20T22:25:49,943 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:49,943 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:49,943 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. after waiting 0 ms 2024-11-20T22:25:49,943 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:49,943 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:49,943 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:49,947 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:25:49,947 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732141549947"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141549947"}]},"ts":"1732141549947"} 2024-11-20T22:25:49,952 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:25:49,952 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:25:49,953 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141549952"}]},"ts":"1732141549952"} 2024-11-20T22:25:49,954 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T22:25:49,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bacf880704606b04149d74a04b9ceb0e, ASSIGN}] 2024-11-20T22:25:49,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T22:25:49,967 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bacf880704606b04149d74a04b9ceb0e, ASSIGN 2024-11-20T22:25:49,969 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=bacf880704606b04149d74a04b9ceb0e, ASSIGN; state=OFFLINE, location=6365a1e51efd,44631,1732141399950; forceNewPlan=false, retain=false 2024-11-20T22:25:50,119 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=bacf880704606b04149d74a04b9ceb0e, regionState=OPENING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:50,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; OpenRegionProcedure bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:25:50,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T22:25:50,279 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:50,281 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:50,281 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:25:50,281 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:50,281 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:25:50,282 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:50,282 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:50,283 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:50,283 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:50,284 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bacf880704606b04149d74a04b9ceb0e columnFamilyName A 2024-11-20T22:25:50,284 DEBUG [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:50,284 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.HStore(327): Store=bacf880704606b04149d74a04b9ceb0e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:50,284 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:50,285 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:50,285 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bacf880704606b04149d74a04b9ceb0e columnFamilyName B 2024-11-20T22:25:50,285 DEBUG [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:50,286 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.HStore(327): Store=bacf880704606b04149d74a04b9ceb0e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:50,286 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:50,287 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:50,287 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bacf880704606b04149d74a04b9ceb0e columnFamilyName C 2024-11-20T22:25:50,287 DEBUG [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:50,287 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.HStore(327): Store=bacf880704606b04149d74a04b9ceb0e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:50,287 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:50,288 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:50,288 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:50,290 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:25:50,291 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:50,293 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:25:50,294 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened bacf880704606b04149d74a04b9ceb0e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62385320, jitterRate=-0.07038629055023193}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:25:50,295 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:50,295 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., pid=151, masterSystemTime=1732141550279 2024-11-20T22:25:50,296 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:50,296 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:50,297 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=bacf880704606b04149d74a04b9ceb0e, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:50,298 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-20T22:25:50,298 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; OpenRegionProcedure bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 in 170 msec 2024-11-20T22:25:50,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-11-20T22:25:50,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bacf880704606b04149d74a04b9ceb0e, ASSIGN in 332 msec 2024-11-20T22:25:50,300 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:25:50,300 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141550300"}]},"ts":"1732141550300"} 2024-11-20T22:25:50,301 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T22:25:50,308 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:25:50,310 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 446 msec 2024-11-20T22:25:50,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T22:25:50,468 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 149 completed 2024-11-20T22:25:50,470 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x151bac0d to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a1fe6e4 2024-11-20T22:25:50,510 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58e7ba75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:50,523 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:50,524 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52024, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:50,525 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:25:50,525 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33116, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:25:50,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:25:50,527 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:25:50,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:50,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742396_1572 (size=996) 2024-11-20T22:25:50,953 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T22:25:50,954 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T22:25:50,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:25:50,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bacf880704606b04149d74a04b9ceb0e, REOPEN/MOVE}] 2024-11-20T22:25:50,957 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bacf880704606b04149d74a04b9ceb0e, REOPEN/MOVE 2024-11-20T22:25:50,957 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=bacf880704606b04149d74a04b9ceb0e, regionState=CLOSING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:50,958 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:25:50,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; CloseRegionProcedure bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:25:51,109 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,109 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(124): Close bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,109 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:25:51,109 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1681): Closing bacf880704606b04149d74a04b9ceb0e, disabling compactions & flushes 2024-11-20T22:25:51,109 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:51,109 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:51,109 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. after waiting 0 ms 2024-11-20T22:25:51,110 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:51,112 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T22:25:51,113 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:51,113 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1635): Region close journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:51,113 WARN [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionServer(3786): Not adding moved region record: bacf880704606b04149d74a04b9ceb0e to self. 2024-11-20T22:25:51,114 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(170): Closed bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,114 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=bacf880704606b04149d74a04b9ceb0e, regionState=CLOSED 2024-11-20T22:25:51,116 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-20T22:25:51,116 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; CloseRegionProcedure bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 in 157 msec 2024-11-20T22:25:51,116 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=bacf880704606b04149d74a04b9ceb0e, REOPEN/MOVE; state=CLOSED, location=6365a1e51efd,44631,1732141399950; forceNewPlan=false, retain=true 2024-11-20T22:25:51,267 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=bacf880704606b04149d74a04b9ceb0e, regionState=OPENING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=154, state=RUNNABLE; OpenRegionProcedure bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:25:51,419 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,421 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:51,421 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7285): Opening region: {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:25:51,422 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,422 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:25:51,422 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7327): checking encryption for bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,422 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7330): checking classloading for bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,423 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,424 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:51,424 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bacf880704606b04149d74a04b9ceb0e columnFamilyName A 2024-11-20T22:25:51,425 DEBUG [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:51,425 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.HStore(327): Store=bacf880704606b04149d74a04b9ceb0e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:51,426 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,426 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:51,426 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bacf880704606b04149d74a04b9ceb0e columnFamilyName B 2024-11-20T22:25:51,426 DEBUG [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:51,427 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.HStore(327): Store=bacf880704606b04149d74a04b9ceb0e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:51,427 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,428 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:51,428 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bacf880704606b04149d74a04b9ceb0e columnFamilyName C 2024-11-20T22:25:51,428 DEBUG [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:51,428 INFO [StoreOpener-bacf880704606b04149d74a04b9ceb0e-1 {}] regionserver.HStore(327): Store=bacf880704606b04149d74a04b9ceb0e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:51,429 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:51,429 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,430 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,432 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:25:51,434 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1085): writing seq id for bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,435 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1102): Opened bacf880704606b04149d74a04b9ceb0e; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70831315, jitterRate=0.0554688423871994}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:25:51,436 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1001): Region open journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:51,436 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., pid=156, masterSystemTime=1732141551419 2024-11-20T22:25:51,437 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:51,437 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:51,438 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=bacf880704606b04149d74a04b9ceb0e, regionState=OPEN, openSeqNum=5, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,439 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=154 2024-11-20T22:25:51,439 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=154, state=SUCCESS; OpenRegionProcedure bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 in 170 msec 2024-11-20T22:25:51,440 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-20T22:25:51,440 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bacf880704606b04149d74a04b9ceb0e, REOPEN/MOVE in 483 msec 2024-11-20T22:25:51,441 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-20T22:25:51,441 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 485 msec 2024-11-20T22:25:51,443 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 915 msec 2024-11-20T22:25:51,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T22:25:51,444 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2089b1f4 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@55544bc7 2024-11-20T22:25:51,483 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3005670a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:51,484 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65f51785 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1208728f 2024-11-20T22:25:51,491 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@92e7af3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:51,492 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3cc71f2e to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d0a9e33 2024-11-20T22:25:51,500 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17899883, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:51,501 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79a7bd2b to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40e55f2a 2024-11-20T22:25:51,508 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b739a35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:51,509 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d688bcb to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@271e8143 2024-11-20T22:25:51,516 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20bb05a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:51,517 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62b06a95 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a5ecd59 2024-11-20T22:25:51,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62750e61, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:51,534 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d02ace0 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61da8c1c 2024-11-20T22:25:51,550 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b968040, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:51,551 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63054209 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560a8819 2024-11-20T22:25:51,566 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49019618, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:51,568 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3fbb1399 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3df30e37 2024-11-20T22:25:51,584 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7887fec7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:51,584 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51fccca6 to 127.0.0.1:51822 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@745bf218 2024-11-20T22:25:51,600 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@336d4b92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:51,615 DEBUG [hconnection-0x4d4966ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:51,616 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52028, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:51,619 DEBUG [hconnection-0x267aff80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:51,619 DEBUG [hconnection-0x7e20c0c6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:51,620 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52042, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:51,620 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52040, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:51,623 DEBUG [hconnection-0x76e2646b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:51,624 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52052, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:51,633 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:51,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-11-20T22:25:51,634 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:51,634 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:51,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:51,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:25:51,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:25:51,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:25:51,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:51,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:25:51,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:51,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:25:51,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:51,674 DEBUG [hconnection-0x40946f0b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:51,676 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52056, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:51,678 DEBUG [hconnection-0x12dc2f25-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:51,679 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52068, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:51,686 DEBUG [hconnection-0x4cd06f2a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:51,688 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52082, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:51,698 DEBUG [hconnection-0x619d0016-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:51,699 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:51,714 DEBUG [hconnection-0x7f457114-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:51,715 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52096, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:51,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141611724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141611724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141611726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141611726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,730 DEBUG [hconnection-0x26d3cf66-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:51,731 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52108, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:51,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141611728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:25:51,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200371963a5a87453ab085f2fc5e0dfda9_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141551648/Put/seqid=0 2024-11-20T22:25:51,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742397_1573 (size=14594) 2024-11-20T22:25:51,772 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:51,778 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200371963a5a87453ab085f2fc5e0dfda9_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200371963a5a87453ab085f2fc5e0dfda9_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:51,783 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/ad81fb331931472b85218348bc432264, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:51,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/ad81fb331931472b85218348bc432264 is 175, key is test_row_0/A:col10/1732141551648/Put/seqid=0 2024-11-20T22:25:51,787 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:51,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:51,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:51,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:51,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:51,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:51,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:51,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742398_1574 (size=39549) 2024-11-20T22:25:51,811 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/ad81fb331931472b85218348bc432264 2024-11-20T22:25:51,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141611830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141611831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141611831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141611831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141611834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,862 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/e415b976194f44d4a4530135aad71ea5 is 50, key is test_row_0/B:col10/1732141551648/Put/seqid=0 2024-11-20T22:25:51,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742399_1575 (size=12001) 2024-11-20T22:25:51,889 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/e415b976194f44d4a4530135aad71ea5 2024-11-20T22:25:51,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/f5683d5f9f8a4378997b29ea792f1e0f is 50, key is test_row_0/C:col10/1732141551648/Put/seqid=0 2024-11-20T22:25:51,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:25:51,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742400_1576 (size=12001) 2024-11-20T22:25:51,941 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/f5683d5f9f8a4378997b29ea792f1e0f 2024-11-20T22:25:51,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/ad81fb331931472b85218348bc432264 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ad81fb331931472b85218348bc432264 2024-11-20T22:25:51,947 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:51,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:51,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:51,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:51,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:51,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:51,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:51,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:51,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ad81fb331931472b85218348bc432264, entries=200, sequenceid=15, filesize=38.6 K 2024-11-20T22:25:51,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/e415b976194f44d4a4530135aad71ea5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/e415b976194f44d4a4530135aad71ea5 2024-11-20T22:25:51,960 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/e415b976194f44d4a4530135aad71ea5, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T22:25:51,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/f5683d5f9f8a4378997b29ea792f1e0f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f5683d5f9f8a4378997b29ea792f1e0f 2024-11-20T22:25:51,976 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f5683d5f9f8a4378997b29ea792f1e0f, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T22:25:51,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for bacf880704606b04149d74a04b9ceb0e in 324ms, sequenceid=15, compaction requested=false 2024-11-20T22:25:51,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:52,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:52,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T22:25:52,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:25:52,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:52,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:25:52,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:52,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:25:52,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:52,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141612039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141612040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141612041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141612042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141612042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201c7b6a6d2faf443daac6e48fcae9ccf1_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141552036/Put/seqid=0 2024-11-20T22:25:52,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742401_1577 (size=12154) 2024-11-20T22:25:52,096 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,102 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:52,102 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201c7b6a6d2faf443daac6e48fcae9ccf1_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201c7b6a6d2faf443daac6e48fcae9ccf1_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:52,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:52,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:52,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:52,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,104 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/b61e7aa507e143e29e2781524b86c106, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:52,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/b61e7aa507e143e29e2781524b86c106 is 175, key is test_row_0/A:col10/1732141552036/Put/seqid=0 2024-11-20T22:25:52,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742402_1578 (size=30955) 2024-11-20T22:25:52,126 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=44, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/b61e7aa507e143e29e2781524b86c106 2024-11-20T22:25:52,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/b69be3a705a6484282e8246ea4a2a2db is 50, key is test_row_0/B:col10/1732141552036/Put/seqid=0 2024-11-20T22:25:52,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141612144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141612144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141612144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141612145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742403_1579 (size=12001) 2024-11-20T22:25:52,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:25:52,259 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:52,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:52,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:52,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:52,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141612343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141612347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141612347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141612347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141612349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,413 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:52,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:52,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:52,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:52,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,557 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/b69be3a705a6484282e8246ea4a2a2db 2024-11-20T22:25:52,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/4294785826d847dcb0d181f442a11fd7 is 50, key is test_row_0/C:col10/1732141552036/Put/seqid=0 2024-11-20T22:25:52,570 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:52,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:52,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:52,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:52,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742404_1580 (size=12001) 2024-11-20T22:25:52,586 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/4294785826d847dcb0d181f442a11fd7 2024-11-20T22:25:52,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/b61e7aa507e143e29e2781524b86c106 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b61e7aa507e143e29e2781524b86c106 2024-11-20T22:25:52,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b61e7aa507e143e29e2781524b86c106, entries=150, sequenceid=44, filesize=30.2 K 2024-11-20T22:25:52,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/b69be3a705a6484282e8246ea4a2a2db as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/b69be3a705a6484282e8246ea4a2a2db 2024-11-20T22:25:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,610 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/b69be3a705a6484282e8246ea4a2a2db, entries=150, sequenceid=44, filesize=11.7 K 2024-11-20T22:25:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/4294785826d847dcb0d181f442a11fd7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/4294785826d847dcb0d181f442a11fd7 2024-11-20T22:25:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/4294785826d847dcb0d181f442a11fd7, entries=150, sequenceid=44, filesize=11.7 K 2024-11-20T22:25:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for bacf880704606b04149d74a04b9ceb0e in 579ms, sequenceid=44, compaction requested=false 2024-11-20T22:25:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:25:52,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:25:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:52,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:25:52,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:52,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:25:52,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,685 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204733b7fa31f44f6daaf847d3e0daeac0_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141552040/Put/seqid=0 2024-11-20T22:25:52,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,725 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:52,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:52,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:52,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:52,726 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742405_1581 (size=19474) 2024-11-20T22:25:52,735 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:25:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,744 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204733b7fa31f44f6daaf847d3e0daeac0_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204733b7fa31f44f6daaf847d3e0daeac0_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:52,745 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/8705f7cbf1f84b4dae5dd4e6b9671e48, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:52,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/8705f7cbf1f84b4dae5dd4e6b9671e48 is 175, key is test_row_0/A:col10/1732141552040/Put/seqid=0 2024-11-20T22:25:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:52,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141612755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141612758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141612759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141612761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742406_1582 (size=56733) 2024-11-20T22:25:52,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141612850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141612862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141612868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141612871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141612875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,887 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:52,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:52,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:52,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:52,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:52,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,040 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:53,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:53,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141613069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141613073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141613078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141613079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,194 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/8705f7cbf1f84b4dae5dd4e6b9671e48 2024-11-20T22:25:53,196 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:53,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:53,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,211 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/66a38c00149a4403b1c60f424b85d4ce is 50, key is test_row_0/B:col10/1732141552040/Put/seqid=0 2024-11-20T22:25:53,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742407_1583 (size=12001) 2024-11-20T22:25:53,350 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:53,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:53,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,352 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141613374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141613377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141613383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141613385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:53,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:53,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,510 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/66a38c00149a4403b1c60f424b85d4ce 2024-11-20T22:25:53,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/ef46413e76004a8bb9d6fdb383b366e6 is 50, key is test_row_0/C:col10/1732141552040/Put/seqid=0 2024-11-20T22:25:53,663 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:53,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:53,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,665 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742408_1584 (size=12001) 2024-11-20T22:25:53,684 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/ef46413e76004a8bb9d6fdb383b366e6 2024-11-20T22:25:53,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/8705f7cbf1f84b4dae5dd4e6b9671e48 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/8705f7cbf1f84b4dae5dd4e6b9671e48 2024-11-20T22:25:53,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/8705f7cbf1f84b4dae5dd4e6b9671e48, entries=300, sequenceid=55, filesize=55.4 K 2024-11-20T22:25:53,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/66a38c00149a4403b1c60f424b85d4ce as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/66a38c00149a4403b1c60f424b85d4ce 2024-11-20T22:25:53,701 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/66a38c00149a4403b1c60f424b85d4ce, entries=150, sequenceid=55, filesize=11.7 K 2024-11-20T22:25:53,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/ef46413e76004a8bb9d6fdb383b366e6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ef46413e76004a8bb9d6fdb383b366e6 2024-11-20T22:25:53,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ef46413e76004a8bb9d6fdb383b366e6, entries=150, sequenceid=55, filesize=11.7 K 2024-11-20T22:25:53,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for bacf880704606b04149d74a04b9ceb0e in 1044ms, sequenceid=55, compaction requested=true 2024-11-20T22:25:53,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:53,709 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:53,710 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127237 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:53,710 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/A is initiating minor compaction (all files) 2024-11-20T22:25:53,710 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/A in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,710 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ad81fb331931472b85218348bc432264, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b61e7aa507e143e29e2781524b86c106, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/8705f7cbf1f84b4dae5dd4e6b9671e48] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=124.3 K 2024-11-20T22:25:53,710 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,710 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ad81fb331931472b85218348bc432264, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b61e7aa507e143e29e2781524b86c106, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/8705f7cbf1f84b4dae5dd4e6b9671e48] 2024-11-20T22:25:53,711 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ad81fb331931472b85218348bc432264, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141551629 2024-11-20T22:25:53,711 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting b61e7aa507e143e29e2781524b86c106, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732141551704 2024-11-20T22:25:53,711 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 8705f7cbf1f84b4dae5dd4e6b9671e48, keycount=300, bloomtype=ROW, size=55.4 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732141552040 2024-11-20T22:25:53,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:53,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:53,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:53,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:53,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:53,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:25:53,716 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:53,717 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:53,717 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/B is initiating minor compaction (all files) 2024-11-20T22:25:53,717 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/B in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,717 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/e415b976194f44d4a4530135aad71ea5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/b69be3a705a6484282e8246ea4a2a2db, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/66a38c00149a4403b1c60f424b85d4ce] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=35.2 K 2024-11-20T22:25:53,717 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting e415b976194f44d4a4530135aad71ea5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141551632 2024-11-20T22:25:53,718 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting b69be3a705a6484282e8246ea4a2a2db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732141551704 2024-11-20T22:25:53,719 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66a38c00149a4403b1c60f424b85d4ce, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732141552040 2024-11-20T22:25:53,730 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:53,737 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#B#compaction#495 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:53,737 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/073c661fff114780abbe32bcac8ba704 is 50, key is test_row_0/B:col10/1732141552040/Put/seqid=0 2024-11-20T22:25:53,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:25:53,752 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112014715e9f67d344869b0dd2db812ccd7f_bacf880704606b04149d74a04b9ceb0e store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:53,755 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112014715e9f67d344869b0dd2db812ccd7f_bacf880704606b04149d74a04b9ceb0e, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:53,755 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112014715e9f67d344869b0dd2db812ccd7f_bacf880704606b04149d74a04b9ceb0e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:53,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742409_1585 (size=12104) 2024-11-20T22:25:53,818 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:25:53,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:53,820 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:25:53,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:25:53,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:53,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:25:53,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:53,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:25:53,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:53,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742410_1586 (size=4469) 2024-11-20T22:25:53,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201a39e4380bd849ac9625a8e1a21b64a8_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141552751/Put/seqid=0 2024-11-20T22:25:53,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:53,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:53,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742411_1587 (size=12154) 2024-11-20T22:25:53,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:53,888 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201a39e4380bd849ac9625a8e1a21b64a8_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201a39e4380bd849ac9625a8e1a21b64a8_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:53,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/87c476c7f3e14b3f907958a8d4eb3fd3, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:53,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/87c476c7f3e14b3f907958a8d4eb3fd3 is 175, key is test_row_0/A:col10/1732141552751/Put/seqid=0 2024-11-20T22:25:53,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141613890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141613895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141613895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141613896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:53,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742412_1588 (size=30955) 2024-11-20T22:25:53,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141613905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:54,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141614003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:54,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141614007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:54,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141614013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:54,210 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/073c661fff114780abbe32bcac8ba704 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/073c661fff114780abbe32bcac8ba704 2024-11-20T22:25:54,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141614208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:54,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141614213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:54,222 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/B of bacf880704606b04149d74a04b9ceb0e into 073c661fff114780abbe32bcac8ba704(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:54,222 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:54,222 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/B, priority=13, startTime=1732141553716; duration=0sec 2024-11-20T22:25:54,222 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:54,222 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:B 2024-11-20T22:25:54,222 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:54,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141614222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:54,227 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:54,227 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/C is initiating minor compaction (all files) 2024-11-20T22:25:54,227 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/C in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:54,227 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f5683d5f9f8a4378997b29ea792f1e0f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/4294785826d847dcb0d181f442a11fd7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ef46413e76004a8bb9d6fdb383b366e6] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=35.2 K 2024-11-20T22:25:54,228 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5683d5f9f8a4378997b29ea792f1e0f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141551632 2024-11-20T22:25:54,228 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4294785826d847dcb0d181f442a11fd7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732141551704 2024-11-20T22:25:54,228 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef46413e76004a8bb9d6fdb383b366e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732141552040 2024-11-20T22:25:54,233 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#A#compaction#494 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:54,234 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/2ed5f848b91e48b1a13f4ba7594f7b4d is 175, key is test_row_0/A:col10/1732141552040/Put/seqid=0 2024-11-20T22:25:54,268 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#C#compaction#497 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:54,269 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/bed795af1d874d728594d40154e0ed79 is 50, key is test_row_0/C:col10/1732141552040/Put/seqid=0 2024-11-20T22:25:54,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742413_1589 (size=31058) 2024-11-20T22:25:54,310 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=80, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/87c476c7f3e14b3f907958a8d4eb3fd3 2024-11-20T22:25:54,317 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/2ed5f848b91e48b1a13f4ba7594f7b4d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2ed5f848b91e48b1a13f4ba7594f7b4d 2024-11-20T22:25:54,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742414_1590 (size=12104) 2024-11-20T22:25:54,327 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/A of bacf880704606b04149d74a04b9ceb0e into 2ed5f848b91e48b1a13f4ba7594f7b4d(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:54,327 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:54,327 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/A, priority=13, startTime=1732141553709; duration=0sec 2024-11-20T22:25:54,328 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:54,328 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:A 2024-11-20T22:25:54,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/61ffae69605040f5af0b3c671efc0a01 is 50, key is test_row_0/B:col10/1732141552751/Put/seqid=0 2024-11-20T22:25:54,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742415_1591 (size=12001) 2024-11-20T22:25:54,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141614515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:54,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141614518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:54,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141614528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:54,743 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/bed795af1d874d728594d40154e0ed79 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/bed795af1d874d728594d40154e0ed79 2024-11-20T22:25:54,751 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/C of bacf880704606b04149d74a04b9ceb0e into bed795af1d874d728594d40154e0ed79(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:54,751 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:54,751 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/C, priority=13, startTime=1732141553716; duration=0sec 2024-11-20T22:25:54,751 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:54,751 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:C 2024-11-20T22:25:54,796 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T22:25:54,810 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/61ffae69605040f5af0b3c671efc0a01 2024-11-20T22:25:54,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/8cb205eb99b74406a9c60b9349f251b4 is 50, key is test_row_0/C:col10/1732141552751/Put/seqid=0 2024-11-20T22:25:54,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742416_1592 (size=12001) 2024-11-20T22:25:54,866 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/8cb205eb99b74406a9c60b9349f251b4 2024-11-20T22:25:54,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/87c476c7f3e14b3f907958a8d4eb3fd3 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/87c476c7f3e14b3f907958a8d4eb3fd3 2024-11-20T22:25:54,883 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/87c476c7f3e14b3f907958a8d4eb3fd3, entries=150, sequenceid=80, filesize=30.2 K 2024-11-20T22:25:54,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/61ffae69605040f5af0b3c671efc0a01 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/61ffae69605040f5af0b3c671efc0a01 2024-11-20T22:25:54,889 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/61ffae69605040f5af0b3c671efc0a01, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T22:25:54,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/8cb205eb99b74406a9c60b9349f251b4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/8cb205eb99b74406a9c60b9349f251b4 2024-11-20T22:25:54,894 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/8cb205eb99b74406a9c60b9349f251b4, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T22:25:54,895 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for bacf880704606b04149d74a04b9ceb0e in 1075ms, sequenceid=80, compaction requested=false 2024-11-20T22:25:54,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:54,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:54,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-11-20T22:25:54,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-11-20T22:25:54,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-20T22:25:54,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2650 sec 2024-11-20T22:25:54,902 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 3.2680 sec 2024-11-20T22:25:54,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:25:54,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:25:54,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:54,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:25:54,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:54,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:25:54,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:54,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:54,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112020e47ffbb2d143a9931dfb168adda517_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141553879/Put/seqid=0 2024-11-20T22:25:54,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742417_1593 (size=12154) 2024-11-20T22:25:55,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141615040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141615040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141615043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141615055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141615055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141615154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141615154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141615155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141615166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141615167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141615361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141615361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141615365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,372 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:55,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141615371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141615373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,378 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112020e47ffbb2d143a9931dfb168adda517_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112020e47ffbb2d143a9931dfb168adda517_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:55,379 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/5e2059836b8349a9a56a7230dd3782e5, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:55,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/5e2059836b8349a9a56a7230dd3782e5 is 175, key is test_row_0/A:col10/1732141553879/Put/seqid=0 2024-11-20T22:25:55,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742418_1594 (size=30955) 2024-11-20T22:25:55,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141615666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141615666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141615669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141615678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141615677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:25:55,740 INFO [Thread-2535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-20T22:25:55,742 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-20T22:25:55,744 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:55,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T22:25:55,745 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:55,745 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:55,817 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=96, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/5e2059836b8349a9a56a7230dd3782e5 2024-11-20T22:25:55,836 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/ab09beb5378149ad85498c5f3e1bb601 is 50, key is test_row_0/B:col10/1732141553879/Put/seqid=0 2024-11-20T22:25:55,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T22:25:55,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742419_1595 (size=12001) 2024-11-20T22:25:55,874 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/ab09beb5378149ad85498c5f3e1bb601 2024-11-20T22:25:55,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/e2dc43ee86d4475f8ef2f5466484cf73 is 50, key is test_row_0/C:col10/1732141553879/Put/seqid=0 2024-11-20T22:25:55,896 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:55,898 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T22:25:55,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:55,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:55,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:55,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:55,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:55,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:55,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742420_1596 (size=12001) 2024-11-20T22:25:55,930 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/e2dc43ee86d4475f8ef2f5466484cf73 2024-11-20T22:25:55,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/5e2059836b8349a9a56a7230dd3782e5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/5e2059836b8349a9a56a7230dd3782e5 2024-11-20T22:25:55,954 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/5e2059836b8349a9a56a7230dd3782e5, entries=150, sequenceid=96, filesize=30.2 K 2024-11-20T22:25:55,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/ab09beb5378149ad85498c5f3e1bb601 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/ab09beb5378149ad85498c5f3e1bb601 2024-11-20T22:25:55,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/ab09beb5378149ad85498c5f3e1bb601, entries=150, sequenceid=96, filesize=11.7 K 2024-11-20T22:25:55,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/e2dc43ee86d4475f8ef2f5466484cf73 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/e2dc43ee86d4475f8ef2f5466484cf73 2024-11-20T22:25:55,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/e2dc43ee86d4475f8ef2f5466484cf73, entries=150, sequenceid=96, filesize=11.7 K 2024-11-20T22:25:55,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for bacf880704606b04149d74a04b9ceb0e in 1088ms, sequenceid=96, compaction requested=true 2024-11-20T22:25:55,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:55,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:55,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:55,996 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:55,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:55,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:55,996 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:55,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:55,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:55,997 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:55,997 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/A is initiating minor compaction (all files) 2024-11-20T22:25:55,997 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/A in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:55,997 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2ed5f848b91e48b1a13f4ba7594f7b4d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/87c476c7f3e14b3f907958a8d4eb3fd3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/5e2059836b8349a9a56a7230dd3782e5] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=90.8 K 2024-11-20T22:25:55,997 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:55,997 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2ed5f848b91e48b1a13f4ba7594f7b4d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/87c476c7f3e14b3f907958a8d4eb3fd3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/5e2059836b8349a9a56a7230dd3782e5] 2024-11-20T22:25:55,998 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:55,998 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/B is initiating minor compaction (all files) 2024-11-20T22:25:55,998 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/B in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:55,998 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/073c661fff114780abbe32bcac8ba704, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/61ffae69605040f5af0b3c671efc0a01, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/ab09beb5378149ad85498c5f3e1bb601] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=35.3 K 2024-11-20T22:25:55,998 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ed5f848b91e48b1a13f4ba7594f7b4d, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732141552040 2024-11-20T22:25:55,998 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 073c661fff114780abbe32bcac8ba704, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732141552040 2024-11-20T22:25:55,999 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 61ffae69605040f5af0b3c671efc0a01, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732141552745 2024-11-20T22:25:55,999 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87c476c7f3e14b3f907958a8d4eb3fd3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732141552745 2024-11-20T22:25:55,999 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ab09beb5378149ad85498c5f3e1bb601, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732141553879 2024-11-20T22:25:56,000 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e2059836b8349a9a56a7230dd3782e5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732141553879 2024-11-20T22:25:56,013 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:56,024 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#B#compaction#504 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:56,025 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/2efedca84ead4590b196b328e48a9514 is 50, key is test_row_0/B:col10/1732141553879/Put/seqid=0 2024-11-20T22:25:56,027 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411208a904613383d41849d711b30e16b47a4_bacf880704606b04149d74a04b9ceb0e store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:56,029 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411208a904613383d41849d711b30e16b47a4_bacf880704606b04149d74a04b9ceb0e, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:56,029 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208a904613383d41849d711b30e16b47a4_bacf880704606b04149d74a04b9ceb0e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:56,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T22:25:56,051 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T22:25:56,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:56,052 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:25:56,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:25:56,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:56,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:25:56,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:56,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:25:56,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:56,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742421_1597 (size=12207) 2024-11-20T22:25:56,073 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/2efedca84ead4590b196b328e48a9514 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/2efedca84ead4590b196b328e48a9514 2024-11-20T22:25:56,093 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/B of bacf880704606b04149d74a04b9ceb0e into 2efedca84ead4590b196b328e48a9514(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:56,093 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:56,093 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/B, priority=13, startTime=1732141555996; duration=0sec 2024-11-20T22:25:56,093 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:56,093 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:B 2024-11-20T22:25:56,093 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:56,096 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:56,096 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/C is initiating minor compaction (all files) 2024-11-20T22:25:56,096 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/C in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:56,096 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/bed795af1d874d728594d40154e0ed79, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/8cb205eb99b74406a9c60b9349f251b4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/e2dc43ee86d4475f8ef2f5466484cf73] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=35.3 K 2024-11-20T22:25:56,096 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting bed795af1d874d728594d40154e0ed79, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732141552040 2024-11-20T22:25:56,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742422_1598 (size=4469) 2024-11-20T22:25:56,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120840228c8fcd04a4ab77644b621491ad5_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141555050/Put/seqid=0 2024-11-20T22:25:56,099 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cb205eb99b74406a9c60b9349f251b4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732141552745 2024-11-20T22:25:56,099 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting e2dc43ee86d4475f8ef2f5466484cf73, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732141553879 2024-11-20T22:25:56,118 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#C#compaction#506 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:56,118 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/f1364c7f7a0f4d618eac1aebc83700d8 is 50, key is test_row_0/C:col10/1732141553879/Put/seqid=0 2024-11-20T22:25:56,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742423_1599 (size=12154) 2024-11-20T22:25:56,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742424_1600 (size=12207) 2024-11-20T22:25:56,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:56,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:56,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141616190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141616191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141616196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141616197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141616198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141616303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141616304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141616304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T22:25:56,494 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#A#compaction#503 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:56,495 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/ae48ef50907540999add71f47a936db5 is 175, key is test_row_0/A:col10/1732141553879/Put/seqid=0 2024-11-20T22:25:56,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742425_1601 (size=31161) 2024-11-20T22:25:56,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141616507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,511 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/ae48ef50907540999add71f47a936db5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ae48ef50907540999add71f47a936db5 2024-11-20T22:25:56,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141616509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141616511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,516 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/A of bacf880704606b04149d74a04b9ceb0e into ae48ef50907540999add71f47a936db5(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:56,516 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:56,516 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/A, priority=13, startTime=1732141555996; duration=0sec 2024-11-20T22:25:56,516 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:56,517 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:A 2024-11-20T22:25:56,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:56,549 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120840228c8fcd04a4ab77644b621491ad5_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120840228c8fcd04a4ab77644b621491ad5_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:56,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/a38d15dc680141c3bff9bfcb22f83184, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:56,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/a38d15dc680141c3bff9bfcb22f83184 is 175, key is test_row_0/A:col10/1732141555050/Put/seqid=0 2024-11-20T22:25:56,565 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/f1364c7f7a0f4d618eac1aebc83700d8 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f1364c7f7a0f4d618eac1aebc83700d8 2024-11-20T22:25:56,572 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/C of bacf880704606b04149d74a04b9ceb0e into f1364c7f7a0f4d618eac1aebc83700d8(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:56,572 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:56,573 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/C, priority=13, startTime=1732141555996; duration=0sec 2024-11-20T22:25:56,573 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:56,573 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:C 2024-11-20T22:25:56,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742426_1602 (size=30955) 2024-11-20T22:25:56,585 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/a38d15dc680141c3bff9bfcb22f83184 2024-11-20T22:25:56,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/4b51c9ad66974c858cf016bc9ba05725 is 50, key is test_row_0/B:col10/1732141555050/Put/seqid=0 2024-11-20T22:25:56,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742427_1603 (size=12001) 2024-11-20T22:25:56,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141616809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141616817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141616822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:56,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T22:25:57,055 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/4b51c9ad66974c858cf016bc9ba05725 2024-11-20T22:25:57,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/f2676ea35aff4045b26fde3cbb73128e is 50, key is test_row_0/C:col10/1732141555050/Put/seqid=0 2024-11-20T22:25:57,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742428_1604 (size=12001) 2024-11-20T22:25:57,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141617201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:57,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141617211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:57,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141617314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:57,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141617322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:57,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141617330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:57,521 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/f2676ea35aff4045b26fde3cbb73128e 2024-11-20T22:25:57,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/a38d15dc680141c3bff9bfcb22f83184 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/a38d15dc680141c3bff9bfcb22f83184 2024-11-20T22:25:57,527 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/a38d15dc680141c3bff9bfcb22f83184, entries=150, sequenceid=120, filesize=30.2 K 2024-11-20T22:25:57,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/4b51c9ad66974c858cf016bc9ba05725 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/4b51c9ad66974c858cf016bc9ba05725 2024-11-20T22:25:57,530 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/4b51c9ad66974c858cf016bc9ba05725, entries=150, sequenceid=120, filesize=11.7 K 2024-11-20T22:25:57,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/f2676ea35aff4045b26fde3cbb73128e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f2676ea35aff4045b26fde3cbb73128e 2024-11-20T22:25:57,537 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f2676ea35aff4045b26fde3cbb73128e, entries=150, sequenceid=120, filesize=11.7 K 2024-11-20T22:25:57,538 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for bacf880704606b04149d74a04b9ceb0e in 1486ms, sequenceid=120, compaction requested=false 2024-11-20T22:25:57,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:57,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:57,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-20T22:25:57,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-20T22:25:57,540 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-20T22:25:57,541 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7940 sec 2024-11-20T22:25:57,541 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 1.7990 sec 2024-11-20T22:25:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T22:25:57,848 INFO [Thread-2535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-20T22:25:57,849 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-20T22:25:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T22:25:57,850 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:57,850 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:57,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:57,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T22:25:58,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T22:25:58,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:58,002 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:25:58,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:25:58,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:58,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:25:58,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:58,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:25:58,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:58,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f2921496ab764c31a9bfe0e2a7b00db1_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141556187/Put/seqid=0 2024-11-20T22:25:58,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742429_1605 (size=12304) 2024-11-20T22:25:58,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:58,015 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f2921496ab764c31a9bfe0e2a7b00db1_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f2921496ab764c31a9bfe0e2a7b00db1_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:58,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/50a69b4bad894a4a9fcdda2cada9bad5, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:58,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/50a69b4bad894a4a9fcdda2cada9bad5 is 175, key is test_row_0/A:col10/1732141556187/Put/seqid=0 2024-11-20T22:25:58,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742430_1606 (size=31105) 2024-11-20T22:25:58,021 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=136, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/50a69b4bad894a4a9fcdda2cada9bad5 2024-11-20T22:25:58,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/dc7c621b0fe34484b61405bf5fe60dc1 is 50, key is test_row_0/B:col10/1732141556187/Put/seqid=0 2024-11-20T22:25:58,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742431_1607 (size=12151) 2024-11-20T22:25:58,032 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/dc7c621b0fe34484b61405bf5fe60dc1 2024-11-20T22:25:58,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/9c9aa8ec01c14b979615280e6f5c2d05 is 50, key is test_row_0/C:col10/1732141556187/Put/seqid=0 2024-11-20T22:25:58,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742432_1608 (size=12151) 2024-11-20T22:25:58,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T22:25:58,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:58,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:58,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:58,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141618355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:58,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141618358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:58,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141618358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,446 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/9c9aa8ec01c14b979615280e6f5c2d05 2024-11-20T22:25:58,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/50a69b4bad894a4a9fcdda2cada9bad5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/50a69b4bad894a4a9fcdda2cada9bad5 2024-11-20T22:25:58,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T22:25:58,452 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/50a69b4bad894a4a9fcdda2cada9bad5, entries=150, sequenceid=136, filesize=30.4 K 2024-11-20T22:25:58,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/dc7c621b0fe34484b61405bf5fe60dc1 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/dc7c621b0fe34484b61405bf5fe60dc1 2024-11-20T22:25:58,455 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/dc7c621b0fe34484b61405bf5fe60dc1, entries=150, sequenceid=136, filesize=11.9 K 2024-11-20T22:25:58,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/9c9aa8ec01c14b979615280e6f5c2d05 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/9c9aa8ec01c14b979615280e6f5c2d05 2024-11-20T22:25:58,458 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/9c9aa8ec01c14b979615280e6f5c2d05, entries=150, sequenceid=136, filesize=11.9 K 2024-11-20T22:25:58,459 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for bacf880704606b04149d74a04b9ceb0e in 457ms, sequenceid=136, compaction requested=true 2024-11-20T22:25:58,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:25:58,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:58,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-20T22:25:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-20T22:25:58,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:58,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:25:58,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:25:58,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:58,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:25:58,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:58,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:25:58,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:58,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-20T22:25:58,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 610 msec 2024-11-20T22:25:58,467 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 617 msec 2024-11-20T22:25:58,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112074f983ce7dcc4047bcd1cee0c05e3162_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141558460/Put/seqid=0 2024-11-20T22:25:58,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742433_1609 (size=14794) 2024-11-20T22:25:58,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:58,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:58,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141618473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141618473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:58,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141618473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:58,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141618578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:58,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141618578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:58,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141618578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:58,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141618780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:58,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141618780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:58,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141618781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:58,878 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:58,905 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112074f983ce7dcc4047bcd1cee0c05e3162_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112074f983ce7dcc4047bcd1cee0c05e3162_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:25:58,923 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/4dfd3b756e9e47e8a64decfea5dc9577, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:25:58,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/4dfd3b756e9e47e8a64decfea5dc9577 is 175, key is test_row_0/A:col10/1732141558460/Put/seqid=0 2024-11-20T22:25:58,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742434_1610 (size=39749) 2024-11-20T22:25:58,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T22:25:58,955 INFO [Thread-2535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-20T22:25:58,963 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:58,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-20T22:25:58,964 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:58,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:25:58,965 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:58,965 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:59,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:25:59,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:59,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:59,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141619083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141619083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:59,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141619085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,116 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:25:59,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:59,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:59,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:59,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:59,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141619220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,222 DEBUG [Thread-2531 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4167 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:25:59,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141619233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,236 DEBUG [Thread-2533 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4181 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:25:59,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:25:59,268 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:25:59,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:59,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:59,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:59,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,353 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=160, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/4dfd3b756e9e47e8a64decfea5dc9577 2024-11-20T22:25:59,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/38a4955662134fdeb6dead271c5e6718 is 50, key is test_row_0/B:col10/1732141558460/Put/seqid=0 2024-11-20T22:25:59,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742435_1611 (size=12151) 2024-11-20T22:25:59,422 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:25:59,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:59,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:59,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:59,427 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:25:59,585 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:25:59,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:59,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:59,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:59,586 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:59,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141619586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:59,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141619588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141619591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,612 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T22:25:59,612 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T22:25:59,737 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:25:59,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:59,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:59,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:59,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/38a4955662134fdeb6dead271c5e6718 2024-11-20T22:25:59,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/6774f013e21b48d18a8dec9afcb11d91 is 50, key is test_row_0/C:col10/1732141558460/Put/seqid=0 2024-11-20T22:25:59,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742436_1612 (size=12151) 2024-11-20T22:25:59,891 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:25:59,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:25:59,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:59,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:25:59,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:25:59,895 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:00,051 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:00,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:26:00,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:00,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:00,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:00,051 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:00,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:26:00,201 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/6774f013e21b48d18a8dec9afcb11d91 2024-11-20T22:26:00,204 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:00,204 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:26:00,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:00,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:00,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:00,205 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:00,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:00,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/4dfd3b756e9e47e8a64decfea5dc9577 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/4dfd3b756e9e47e8a64decfea5dc9577 2024-11-20T22:26:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:00,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/4dfd3b756e9e47e8a64decfea5dc9577, entries=200, sequenceid=160, filesize=38.8 K 2024-11-20T22:26:00,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/38a4955662134fdeb6dead271c5e6718 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/38a4955662134fdeb6dead271c5e6718 2024-11-20T22:26:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/38a4955662134fdeb6dead271c5e6718, entries=150, sequenceid=160, filesize=11.9 K 2024-11-20T22:26:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/6774f013e21b48d18a8dec9afcb11d91 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/6774f013e21b48d18a8dec9afcb11d91 2024-11-20T22:26:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/6774f013e21b48d18a8dec9afcb11d91, entries=150, sequenceid=160, filesize=11.9 K 2024-11-20T22:26:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,216 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for bacf880704606b04149d74a04b9ceb0e in 1756ms, sequenceid=160, compaction requested=true 2024-11-20T22:26:00,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:00,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:00,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:26:00,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:00,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T22:26:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,219 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:26:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,219 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:26:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,220 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132970 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:26:00,220 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:26:00,220 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/A is initiating minor compaction (all files) 2024-11-20T22:26:00,220 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/C is initiating minor compaction (all files) 2024-11-20T22:26:00,220 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/A in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:00,220 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/C in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,220 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f1364c7f7a0f4d618eac1aebc83700d8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f2676ea35aff4045b26fde3cbb73128e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/9c9aa8ec01c14b979615280e6f5c2d05, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/6774f013e21b48d18a8dec9afcb11d91] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=47.4 K 2024-11-20T22:26:00,220 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ae48ef50907540999add71f47a936db5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/a38d15dc680141c3bff9bfcb22f83184, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/50a69b4bad894a4a9fcdda2cada9bad5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/4dfd3b756e9e47e8a64decfea5dc9577] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=129.9 K 2024-11-20T22:26:00,220 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:00,220 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ae48ef50907540999add71f47a936db5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/a38d15dc680141c3bff9bfcb22f83184, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/50a69b4bad894a4a9fcdda2cada9bad5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/4dfd3b756e9e47e8a64decfea5dc9577] 2024-11-20T22:26:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,220 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting f1364c7f7a0f4d618eac1aebc83700d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732141553879 2024-11-20T22:26:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,221 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae48ef50907540999add71f47a936db5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732141553879 2024-11-20T22:26:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,221 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting f2676ea35aff4045b26fde3cbb73128e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732141555041 2024-11-20T22:26:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,221 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting a38d15dc680141c3bff9bfcb22f83184, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732141555041 2024-11-20T22:26:00,221 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c9aa8ec01c14b979615280e6f5c2d05, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732141556182 2024-11-20T22:26:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,221 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 6774f013e21b48d18a8dec9afcb11d91, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732141558356 2024-11-20T22:26:00,221 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50a69b4bad894a4a9fcdda2cada9bad5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732141556182 2024-11-20T22:26:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,221 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4dfd3b756e9e47e8a64decfea5dc9577, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732141558352 2024-11-20T22:26:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,227 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:00,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,228 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#C#compaction#515 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:00,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,229 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/0a6c07cc6f8f4022ae56b4daaf9aaff5 is 50, key is test_row_0/C:col10/1732141558460/Put/seqid=0 2024-11-20T22:26:00,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,231 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411208df0a0e2dd0a4498aebe56f4aa6e9fad_bacf880704606b04149d74a04b9ceb0e store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:00,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,232 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411208df0a0e2dd0a4498aebe56f4aa6e9fad_bacf880704606b04149d74a04b9ceb0e, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:00,233 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208df0a0e2dd0a4498aebe56f4aa6e9fad_bacf880704606b04149d74a04b9ceb0e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:00,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742437_1613 (size=12493) 2024-11-20T22:26:00,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,254 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/0a6c07cc6f8f4022ae56b4daaf9aaff5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/0a6c07cc6f8f4022ae56b4daaf9aaff5 2024-11-20T22:26:00,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,258 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/C of bacf880704606b04149d74a04b9ceb0e into 0a6c07cc6f8f4022ae56b4daaf9aaff5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:00,258 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:00,258 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/C, priority=12, startTime=1732141560217; duration=0sec 2024-11-20T22:26:00,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,258 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:00,258 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:C 2024-11-20T22:26:00,258 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:26:00,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,260 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:26:00,260 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/B is initiating minor compaction (all files) 2024-11-20T22:26:00,260 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/B in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:00,260 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/2efedca84ead4590b196b328e48a9514, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/4b51c9ad66974c858cf016bc9ba05725, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/dc7c621b0fe34484b61405bf5fe60dc1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/38a4955662134fdeb6dead271c5e6718] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=47.4 K 2024-11-20T22:26:00,260 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2efedca84ead4590b196b328e48a9514, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732141553879 2024-11-20T22:26:00,261 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b51c9ad66974c858cf016bc9ba05725, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732141555041 2024-11-20T22:26:00,261 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting dc7c621b0fe34484b61405bf5fe60dc1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732141556182 2024-11-20T22:26:00,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,261 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 38a4955662134fdeb6dead271c5e6718, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732141558356 2024-11-20T22:26:00,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742438_1614 (size=4469) 2024-11-20T22:26:00,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,272 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#B#compaction#517 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,273 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/cec73974f8894716817cdf1b76ce9f92 is 50, key is test_row_0/B:col10/1732141558460/Put/seqid=0 2024-11-20T22:26:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742439_1615 (size=12493) 2024-11-20T22:26:00,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,279 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/cec73974f8894716817cdf1b76ce9f92 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/cec73974f8894716817cdf1b76ce9f92 2024-11-20T22:26:00,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,282 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/B of bacf880704606b04149d74a04b9ceb0e into cec73974f8894716817cdf1b76ce9f92(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:00,282 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:00,282 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/B, priority=12, startTime=1732141560217; duration=0sec 2024-11-20T22:26:00,282 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:00,282 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:B 2024-11-20T22:26:00,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,357 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:00,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,357 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:26:00,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:00,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,359 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:26:00,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:26:00,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:00,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:26:00,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:00,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:26:00,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:00,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a1bda10fdb7c4331b1919ca2e075273b_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_1/A:col10/1732141558473/Put/seqid=0 2024-11-20T22:26:00,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742440_1616 (size=9814) 2024-11-20T22:26:00,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:00,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:00,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:00,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:00,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141620630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:00,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141620629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:00,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:00,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141620631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:00,663 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#A#compaction#516 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:00,664 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/3a3899d4a7504384a1542e183c812019 is 175, key is test_row_0/A:col10/1732141558460/Put/seqid=0 2024-11-20T22:26:00,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742441_1617 (size=31447) 2024-11-20T22:26:00,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:00,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141620735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:00,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:00,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141620736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:00,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:00,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141620739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:00,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:00,813 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a1bda10fdb7c4331b1919ca2e075273b_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a1bda10fdb7c4331b1919ca2e075273b_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:00,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/2b009c721f7245b7b74c86cfbc12cecf, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:00,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/2b009c721f7245b7b74c86cfbc12cecf is 175, key is test_row_1/A:col10/1732141558473/Put/seqid=0 2024-11-20T22:26:00,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742442_1618 (size=22461) 2024-11-20T22:26:00,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:00,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141620937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:00,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:00,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141620939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:00,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:00,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141620943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:26:01,070 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/3a3899d4a7504384a1542e183c812019 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/3a3899d4a7504384a1542e183c812019 2024-11-20T22:26:01,073 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/A of bacf880704606b04149d74a04b9ceb0e into 3a3899d4a7504384a1542e183c812019(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:01,073 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:01,073 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/A, priority=12, startTime=1732141560217; duration=0sec 2024-11-20T22:26:01,073 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:01,073 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:A 2024-11-20T22:26:01,223 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/2b009c721f7245b7b74c86cfbc12cecf 2024-11-20T22:26:01,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/e8cd84c4defc4116bbc1382445c3e44a is 50, key is test_row_1/B:col10/1732141558473/Put/seqid=0 2024-11-20T22:26:01,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:01,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:01,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141621247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:01,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141621247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:01,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141621248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:01,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742443_1619 (size=9757) 2024-11-20T22:26:01,708 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/e8cd84c4defc4116bbc1382445c3e44a 2024-11-20T22:26:01,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:01,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141621756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:01,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:01,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141621759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:01,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:01,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141621759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:01,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/c9b7246ebb5f4ffeac6930e47990bf93 is 50, key is test_row_1/C:col10/1732141558473/Put/seqid=0 2024-11-20T22:26:01,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742444_1620 (size=9757) 2024-11-20T22:26:01,843 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/c9b7246ebb5f4ffeac6930e47990bf93 2024-11-20T22:26:01,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/2b009c721f7245b7b74c86cfbc12cecf as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2b009c721f7245b7b74c86cfbc12cecf 2024-11-20T22:26:01,891 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2b009c721f7245b7b74c86cfbc12cecf, entries=100, sequenceid=174, filesize=21.9 K 2024-11-20T22:26:01,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/e8cd84c4defc4116bbc1382445c3e44a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/e8cd84c4defc4116bbc1382445c3e44a 2024-11-20T22:26:01,913 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/e8cd84c4defc4116bbc1382445c3e44a, entries=100, sequenceid=174, filesize=9.5 K 2024-11-20T22:26:01,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/c9b7246ebb5f4ffeac6930e47990bf93 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/c9b7246ebb5f4ffeac6930e47990bf93 2024-11-20T22:26:01,946 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/c9b7246ebb5f4ffeac6930e47990bf93, entries=100, sequenceid=174, filesize=9.5 K 2024-11-20T22:26:01,947 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for bacf880704606b04149d74a04b9ceb0e in 1589ms, sequenceid=174, compaction requested=false 2024-11-20T22:26:01,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:01,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:01,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-20T22:26:01,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-20T22:26:01,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-20T22:26:01,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9930 sec 2024-11-20T22:26:02,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 3.0350 sec 2024-11-20T22:26:02,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:02,774 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:26:02,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:26:02,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:02,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:26:02,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:02,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:26:02,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:02,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141622797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:02,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:02,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141622800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:02,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:02,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141622810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:02,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aea08877219540e0abb5c286f1b4b346_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141560630/Put/seqid=0 2024-11-20T22:26:02,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742445_1621 (size=17284) 2024-11-20T22:26:02,868 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:02,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:02,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141622903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:02,913 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aea08877219540e0abb5c286f1b4b346_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aea08877219540e0abb5c286f1b4b346_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:02,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:02,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141622911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:02,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:02,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141622919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:02,930 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/f56e7fbdeb9345e1a8482fef0015284d, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:02,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/f56e7fbdeb9345e1a8482fef0015284d is 175, key is test_row_0/A:col10/1732141560630/Put/seqid=0 2024-11-20T22:26:02,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742446_1622 (size=48389) 2024-11-20T22:26:02,967 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=201, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/f56e7fbdeb9345e1a8482fef0015284d 2024-11-20T22:26:02,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/6f1d232380284b0390943aba391fe930 is 50, key is test_row_0/B:col10/1732141560630/Put/seqid=0 2024-11-20T22:26:03,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742447_1623 (size=12151) 2024-11-20T22:26:03,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/6f1d232380284b0390943aba391fe930 2024-11-20T22:26:03,058 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/96110421a6ca48ccb340cc252c8b94a3 is 50, key is test_row_0/C:col10/1732141560630/Put/seqid=0 2024-11-20T22:26:03,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:26:03,070 INFO [Thread-2535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-20T22:26:03,071 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:03,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-20T22:26:03,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T22:26:03,073 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:03,074 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:03,074 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:03,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742448_1624 (size=12151) 2024-11-20T22:26:03,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:03,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:03,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141623119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:03,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141623119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:03,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:03,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141623127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T22:26:03,226 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:03,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T22:26:03,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:03,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:03,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:03,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:03,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:03,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:03,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52068 deadline: 1732141623259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:03,260 DEBUG [Thread-2531 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8205 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:26:03,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:03,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52094 deadline: 1732141623260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:03,265 DEBUG [Thread-2533 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8209 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., hostname=6365a1e51efd,44631,1732141399950, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:26:03,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T22:26:03,379 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:03,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T22:26:03,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:03,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:03,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:03,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:03,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:03,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:03,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:03,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141623422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:03,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:03,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141623427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:03,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:03,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141623430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:03,486 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/96110421a6ca48ccb340cc252c8b94a3 2024-11-20T22:26:03,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/f56e7fbdeb9345e1a8482fef0015284d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/f56e7fbdeb9345e1a8482fef0015284d 2024-11-20T22:26:03,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/f56e7fbdeb9345e1a8482fef0015284d, entries=250, sequenceid=201, filesize=47.3 K 2024-11-20T22:26:03,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/6f1d232380284b0390943aba391fe930 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/6f1d232380284b0390943aba391fe930 2024-11-20T22:26:03,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/6f1d232380284b0390943aba391fe930, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T22:26:03,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/96110421a6ca48ccb340cc252c8b94a3 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/96110421a6ca48ccb340cc252c8b94a3 2024-11-20T22:26:03,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/96110421a6ca48ccb340cc252c8b94a3, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T22:26:03,531 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:03,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T22:26:03,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:03,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:03,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:03,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for bacf880704606b04149d74a04b9ceb0e in 758ms, sequenceid=201, compaction requested=true 2024-11-20T22:26:03,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:03,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:03,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:03,532 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:03,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:03,533 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102297 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:03,534 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/A is initiating minor compaction (all files) 2024-11-20T22:26:03,534 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/A in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:03,534 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/3a3899d4a7504384a1542e183c812019, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2b009c721f7245b7b74c86cfbc12cecf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/f56e7fbdeb9345e1a8482fef0015284d] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=99.9 K 2024-11-20T22:26:03,534 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:03,534 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/3a3899d4a7504384a1542e183c812019, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2b009c721f7245b7b74c86cfbc12cecf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/f56e7fbdeb9345e1a8482fef0015284d] 2024-11-20T22:26:03,534 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a3899d4a7504384a1542e183c812019, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732141558356 2024-11-20T22:26:03,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:03,535 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b009c721f7245b7b74c86cfbc12cecf, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732141558472 2024-11-20T22:26:03,535 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting f56e7fbdeb9345e1a8482fef0015284d, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732141560611 2024-11-20T22:26:03,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:03,536 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:03,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:03,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:03,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:03,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:03,536 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:03,537 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/B is initiating minor compaction (all files) 2024-11-20T22:26:03,537 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/B in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:03,537 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/cec73974f8894716817cdf1b76ce9f92, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/e8cd84c4defc4116bbc1382445c3e44a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/6f1d232380284b0390943aba391fe930] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=33.6 K 2024-11-20T22:26:03,537 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting cec73974f8894716817cdf1b76ce9f92, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732141558356 2024-11-20T22:26:03,538 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting e8cd84c4defc4116bbc1382445c3e44a, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732141558472 2024-11-20T22:26:03,539 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f1d232380284b0390943aba391fe930, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732141560629 2024-11-20T22:26:03,545 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:03,555 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112020fc298ff2f740c2b17d68aa8f5b708f_bacf880704606b04149d74a04b9ceb0e store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:03,557 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112020fc298ff2f740c2b17d68aa8f5b708f_bacf880704606b04149d74a04b9ceb0e, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:03,557 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112020fc298ff2f740c2b17d68aa8f5b708f_bacf880704606b04149d74a04b9ceb0e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:03,559 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#B#compaction#525 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:03,560 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/5e608102d6e84713809ac219f1a94123 is 50, key is test_row_0/B:col10/1732141560630/Put/seqid=0 2024-11-20T22:26:03,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742449_1625 (size=4469) 2024-11-20T22:26:03,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742450_1626 (size=12595) 2024-11-20T22:26:03,654 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/5e608102d6e84713809ac219f1a94123 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/5e608102d6e84713809ac219f1a94123 2024-11-20T22:26:03,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T22:26:03,685 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:03,685 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/B of bacf880704606b04149d74a04b9ceb0e into 5e608102d6e84713809ac219f1a94123(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:03,685 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:03,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T22:26:03,685 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/B, priority=13, startTime=1732141563536; duration=0sec 2024-11-20T22:26:03,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:03,685 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:03,685 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:B 2024-11-20T22:26:03,685 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:26:03,685 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:03,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:26:03,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:03,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:26:03,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:03,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:26:03,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:03,687 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:03,687 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/C is initiating minor compaction (all files) 2024-11-20T22:26:03,687 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/C in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:03,687 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/0a6c07cc6f8f4022ae56b4daaf9aaff5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/c9b7246ebb5f4ffeac6930e47990bf93, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/96110421a6ca48ccb340cc252c8b94a3] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=33.6 K 2024-11-20T22:26:03,688 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a6c07cc6f8f4022ae56b4daaf9aaff5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732141558356 2024-11-20T22:26:03,691 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c9b7246ebb5f4ffeac6930e47990bf93, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732141558472 2024-11-20T22:26:03,693 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 96110421a6ca48ccb340cc252c8b94a3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732141560629 2024-11-20T22:26:03,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b371495a314348e3a96e6c4237549db2_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141562793/Put/seqid=0 2024-11-20T22:26:03,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742451_1627 (size=12304) 2024-11-20T22:26:03,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,731 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b371495a314348e3a96e6c4237549db2_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b371495a314348e3a96e6c4237549db2_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:03,733 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#C#compaction#527 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:03,733 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/b16db0f1de5f4168a3a04592deebcff7 is 50, key is test_row_0/C:col10/1732141560630/Put/seqid=0 2024-11-20T22:26:03,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/b608ec31b63549b3acc1f74dcefdc954, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:03,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/b608ec31b63549b3acc1f74dcefdc954 is 175, key is test_row_0/A:col10/1732141562793/Put/seqid=0 2024-11-20T22:26:03,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742452_1628 (size=12595) 2024-11-20T22:26:03,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742453_1629 (size=31105) 2024-11-20T22:26:03,763 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/b608ec31b63549b3acc1f74dcefdc954 2024-11-20T22:26:03,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/8996ba7ae2f74acc932b5f585b3e1533 is 50, key is test_row_0/B:col10/1732141562793/Put/seqid=0 2024-11-20T22:26:03,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742454_1630 (size=12151) 2024-11-20T22:26:03,799 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/8996ba7ae2f74acc932b5f585b3e1533 2024-11-20T22:26:03,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/e976953aabce4d1b8d9318622d1ccd22 is 50, key is test_row_0/C:col10/1732141562793/Put/seqid=0 2024-11-20T22:26:03,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742455_1631 (size=12151) 2024-11-20T22:26:03,811 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/e976953aabce4d1b8d9318622d1ccd22 2024-11-20T22:26:03,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/b608ec31b63549b3acc1f74dcefdc954 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b608ec31b63549b3acc1f74dcefdc954 2024-11-20T22:26:03,833 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b608ec31b63549b3acc1f74dcefdc954, entries=150, sequenceid=213, filesize=30.4 K 2024-11-20T22:26:03,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/8996ba7ae2f74acc932b5f585b3e1533 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/8996ba7ae2f74acc932b5f585b3e1533 2024-11-20T22:26:03,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,874 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/8996ba7ae2f74acc932b5f585b3e1533, entries=150, sequenceid=213, filesize=11.9 K 2024-11-20T22:26:03,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/e976953aabce4d1b8d9318622d1ccd22 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/e976953aabce4d1b8d9318622d1ccd22 2024-11-20T22:26:03,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,890 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/e976953aabce4d1b8d9318622d1ccd22, entries=150, sequenceid=213, filesize=11.9 K 2024-11-20T22:26:03,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,891 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for bacf880704606b04149d74a04b9ceb0e in 205ms, sequenceid=213, compaction requested=false 2024-11-20T22:26:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:03,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:03,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-20T22:26:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-20T22:26:03,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,900 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-20T22:26:03,900 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 824 msec 2024-11-20T22:26:03,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,901 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 829 msec 2024-11-20T22:26:03,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,991 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#A#compaction#524 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:03,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,992 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/57f014a1c94e4ecea4105004254eb7a0 is 175, key is test_row_0/A:col10/1732141560630/Put/seqid=0 2024-11-20T22:26:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:03,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:26:04,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:26:04,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:04,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:26:04,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:04,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:26:04,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:04,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:04,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742456_1632 (size=31549) 2024-11-20T22:26:04,019 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/57f014a1c94e4ecea4105004254eb7a0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/57f014a1c94e4ecea4105004254eb7a0 2024-11-20T22:26:04,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112091e1e438bf8740b6a8b24a3eca43799e_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141563997/Put/seqid=0 2024-11-20T22:26:04,033 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/A of bacf880704606b04149d74a04b9ceb0e into 57f014a1c94e4ecea4105004254eb7a0(size=30.8 K), total size for store is 61.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:04,033 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:04,033 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/A, priority=13, startTime=1732141563532; duration=0sec 2024-11-20T22:26:04,033 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:04,033 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:A 2024-11-20T22:26:04,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742458_1634 (size=24758) 2024-11-20T22:26:04,068 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,087 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112091e1e438bf8740b6a8b24a3eca43799e_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112091e1e438bf8740b6a8b24a3eca43799e_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:04,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:04,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141624090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,095 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/0c6fd2b8acbe40b6b8d163171b8582af, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:04,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:04,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141624095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:04,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141624099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/0c6fd2b8acbe40b6b8d163171b8582af is 175, key is test_row_0/A:col10/1732141563997/Put/seqid=0 2024-11-20T22:26:04,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742457_1633 (size=74395) 2024-11-20T22:26:04,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T22:26:04,183 INFO [Thread-2535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-20T22:26:04,188 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:04,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-20T22:26:04,190 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:04,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T22:26:04,193 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:04,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:04,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:04,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141624199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:04,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141624200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:04,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141624215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,218 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/b16db0f1de5f4168a3a04592deebcff7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/b16db0f1de5f4168a3a04592deebcff7 2024-11-20T22:26:04,239 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/C of bacf880704606b04149d74a04b9ceb0e into b16db0f1de5f4168a3a04592deebcff7(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:04,239 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:04,239 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/C, priority=13, startTime=1732141563536; duration=0sec 2024-11-20T22:26:04,239 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:04,239 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:C 2024-11-20T22:26:04,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T22:26:04,353 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:04,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:04,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:04,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:04,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:04,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141624404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:04,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141624405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:04,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141624422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T22:26:04,506 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:04,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:04,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:04,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:04,507 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,508 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=224, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/0c6fd2b8acbe40b6b8d163171b8582af 2024-11-20T22:26:04,513 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/effb1230864e4465b68a76cd4943407e is 50, key is test_row_0/B:col10/1732141563997/Put/seqid=0 2024-11-20T22:26:04,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742459_1635 (size=12151) 2024-11-20T22:26:04,669 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:04,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:04,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:04,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:04,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:04,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141624706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:04,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141624708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:04,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141624726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T22:26:04,823 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:04,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:04,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:04,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:04,827 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,923 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/effb1230864e4465b68a76cd4943407e 2024-11-20T22:26:04,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/ce3eba096fb64137a85ae1788a674277 is 50, key is test_row_0/C:col10/1732141563997/Put/seqid=0 2024-11-20T22:26:04,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742460_1636 (size=12151) 2024-11-20T22:26:04,992 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:04,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:04,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:04,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:04,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:04,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:04,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:05,147 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:05,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:05,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:05,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:05,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:05,148 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:05,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:05,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:05,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:05,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141625210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:05,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:05,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141625214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:05,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:05,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141625229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:05,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T22:26:05,300 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:05,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:05,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:05,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:05,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:05,301 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:05,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:05,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:05,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/ce3eba096fb64137a85ae1788a674277 2024-11-20T22:26:05,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/0c6fd2b8acbe40b6b8d163171b8582af as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0c6fd2b8acbe40b6b8d163171b8582af 2024-11-20T22:26:05,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0c6fd2b8acbe40b6b8d163171b8582af, entries=400, sequenceid=224, filesize=72.7 K 2024-11-20T22:26:05,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/effb1230864e4465b68a76cd4943407e as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/effb1230864e4465b68a76cd4943407e 2024-11-20T22:26:05,360 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/effb1230864e4465b68a76cd4943407e, entries=150, sequenceid=224, filesize=11.9 K 2024-11-20T22:26:05,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/ce3eba096fb64137a85ae1788a674277 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ce3eba096fb64137a85ae1788a674277 2024-11-20T22:26:05,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ce3eba096fb64137a85ae1788a674277, entries=150, sequenceid=224, filesize=11.9 K 2024-11-20T22:26:05,365 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for bacf880704606b04149d74a04b9ceb0e in 1363ms, sequenceid=224, compaction requested=true 2024-11-20T22:26:05,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:05,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:05,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:05,365 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:05,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:05,365 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:05,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:05,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:05,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:05,369 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 137049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:05,369 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:05,369 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/B is initiating minor compaction (all files) 2024-11-20T22:26:05,369 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/A is initiating minor compaction (all files) 2024-11-20T22:26:05,370 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/A in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:05,370 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/B in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:05,370 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/57f014a1c94e4ecea4105004254eb7a0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b608ec31b63549b3acc1f74dcefdc954, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0c6fd2b8acbe40b6b8d163171b8582af] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=133.8 K 2024-11-20T22:26:05,370 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/5e608102d6e84713809ac219f1a94123, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/8996ba7ae2f74acc932b5f585b3e1533, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/effb1230864e4465b68a76cd4943407e] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=36.0 K 2024-11-20T22:26:05,370 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:05,370 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/57f014a1c94e4ecea4105004254eb7a0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b608ec31b63549b3acc1f74dcefdc954, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0c6fd2b8acbe40b6b8d163171b8582af] 2024-11-20T22:26:05,370 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e608102d6e84713809ac219f1a94123, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732141560629 2024-11-20T22:26:05,370 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57f014a1c94e4ecea4105004254eb7a0, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732141560629 2024-11-20T22:26:05,370 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 8996ba7ae2f74acc932b5f585b3e1533, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732141562780 2024-11-20T22:26:05,370 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting effb1230864e4465b68a76cd4943407e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732141563977 2024-11-20T22:26:05,370 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting b608ec31b63549b3acc1f74dcefdc954, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732141562780 2024-11-20T22:26:05,371 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c6fd2b8acbe40b6b8d163171b8582af, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732141563975 2024-11-20T22:26:05,376 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:05,377 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#B#compaction#533 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:05,378 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/ee61610e93864c2db2420e53d554ce04 is 50, key is test_row_0/B:col10/1732141563997/Put/seqid=0 2024-11-20T22:26:05,378 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411202b0fe0be586948ea819a5f56ab7b9cf1_bacf880704606b04149d74a04b9ceb0e store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:05,386 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411202b0fe0be586948ea819a5f56ab7b9cf1_bacf880704606b04149d74a04b9ceb0e, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:05,386 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202b0fe0be586948ea819a5f56ab7b9cf1_bacf880704606b04149d74a04b9ceb0e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:05,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742462_1638 (size=4469) 2024-11-20T22:26:05,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742461_1637 (size=12697) 2024-11-20T22:26:05,452 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:05,453 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:05,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:05,453 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:26:05,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:26:05,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:05,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:26:05,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:05,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:26:05,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:05,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d960475949234fe28285d668748bd115_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141564087/Put/seqid=0 2024-11-20T22:26:05,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742463_1639 (size=12304) 2024-11-20T22:26:05,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:05,532 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d960475949234fe28285d668748bd115_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d960475949234fe28285d668748bd115_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:05,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/81d607a652e84581b6b765264153a0d5, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:05,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/81d607a652e84581b6b765264153a0d5 is 175, key is test_row_0/A:col10/1732141564087/Put/seqid=0 2024-11-20T22:26:05,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742464_1640 (size=31105) 2024-11-20T22:26:05,552 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=251, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/81d607a652e84581b6b765264153a0d5 2024-11-20T22:26:05,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/32f18a1deb0341f98ac0505dd25311a6 is 50, key is test_row_0/B:col10/1732141564087/Put/seqid=0 2024-11-20T22:26:05,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742465_1641 (size=12151) 2024-11-20T22:26:05,565 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/32f18a1deb0341f98ac0505dd25311a6 2024-11-20T22:26:05,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/a48de99023ff4f058835fb6cdd46e708 is 50, key is test_row_0/C:col10/1732141564087/Put/seqid=0 2024-11-20T22:26:05,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742466_1642 (size=12151) 2024-11-20T22:26:05,822 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#A#compaction#534 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:05,823 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/202ff77599274b879eba04ef3786becf is 175, key is test_row_0/A:col10/1732141563997/Put/seqid=0 2024-11-20T22:26:05,855 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/ee61610e93864c2db2420e53d554ce04 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/ee61610e93864c2db2420e53d554ce04 2024-11-20T22:26:05,868 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/B of bacf880704606b04149d74a04b9ceb0e into ee61610e93864c2db2420e53d554ce04(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:05,868 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:05,868 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/B, priority=13, startTime=1732141565365; duration=0sec 2024-11-20T22:26:05,868 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:05,868 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:B 2024-11-20T22:26:05,868 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:05,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742467_1643 (size=31651) 2024-11-20T22:26:05,870 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:05,870 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/C is initiating minor compaction (all files) 2024-11-20T22:26:05,870 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/C in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:05,870 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/b16db0f1de5f4168a3a04592deebcff7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/e976953aabce4d1b8d9318622d1ccd22, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ce3eba096fb64137a85ae1788a674277] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=36.0 K 2024-11-20T22:26:05,870 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting b16db0f1de5f4168a3a04592deebcff7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732141560629 2024-11-20T22:26:05,871 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting e976953aabce4d1b8d9318622d1ccd22, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732141562780 2024-11-20T22:26:05,871 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ce3eba096fb64137a85ae1788a674277, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732141563977 2024-11-20T22:26:05,900 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#C#compaction#538 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:05,901 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/5be380a7c9804f7b97c01e746aaf202d is 50, key is test_row_0/C:col10/1732141563997/Put/seqid=0 2024-11-20T22:26:05,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742468_1644 (size=12697) 2024-11-20T22:26:05,940 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/5be380a7c9804f7b97c01e746aaf202d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/5be380a7c9804f7b97c01e746aaf202d 2024-11-20T22:26:05,947 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/C of bacf880704606b04149d74a04b9ceb0e into 5be380a7c9804f7b97c01e746aaf202d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:05,947 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:05,948 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/C, priority=13, startTime=1732141565365; duration=0sec 2024-11-20T22:26:05,948 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:05,948 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:C 2024-11-20T22:26:06,009 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/a48de99023ff4f058835fb6cdd46e708 2024-11-20T22:26:06,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/81d607a652e84581b6b765264153a0d5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/81d607a652e84581b6b765264153a0d5 2024-11-20T22:26:06,024 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/81d607a652e84581b6b765264153a0d5, entries=150, sequenceid=251, filesize=30.4 K 2024-11-20T22:26:06,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/32f18a1deb0341f98ac0505dd25311a6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/32f18a1deb0341f98ac0505dd25311a6 2024-11-20T22:26:06,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,032 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/32f18a1deb0341f98ac0505dd25311a6, entries=150, sequenceid=251, filesize=11.9 K 2024-11-20T22:26:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/a48de99023ff4f058835fb6cdd46e708 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/a48de99023ff4f058835fb6cdd46e708 2024-11-20T22:26:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,044 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/a48de99023ff4f058835fb6cdd46e708, entries=150, sequenceid=251, filesize=11.9 K 2024-11-20T22:26:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,044 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for bacf880704606b04149d74a04b9ceb0e in 591ms, sequenceid=251, compaction requested=false 2024-11-20T22:26:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:06,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:06,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-20T22:26:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-20T22:26:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-20T22:26:06,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8540 sec 2024-11-20T22:26:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.8640 sec 2024-11-20T22:26:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,275 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/202ff77599274b879eba04ef3786becf as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/202ff77599274b879eba04ef3786becf 2024-11-20T22:26:06,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,279 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/A of bacf880704606b04149d74a04b9ceb0e into 202ff77599274b879eba04ef3786becf(size=30.9 K), total size for store is 61.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:06,279 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:06,279 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/A, priority=13, startTime=1732141565365; duration=0sec 2024-11-20T22:26:06,279 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:06,279 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:A 2024-11-20T22:26:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:26:06,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:26:06,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:06,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:26:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:06,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:26:06,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T22:26:06,296 INFO [Thread-2535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-20T22:26:06,299 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-20T22:26:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,301 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T22:26:06,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201642dfa70c8f4ed79d360d158327ad0c_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141566281/Put/seqid=0 2024-11-20T22:26:06,301 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:06,301 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742469_1645 (size=12454) 2024-11-20T22:26:06,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141626346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141626347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141626348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T22:26:06,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141626449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141626450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141626451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,457 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:06,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:06,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:06,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:06,458 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T22:26:06,610 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:06,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:06,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:06,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:06,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141626652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141626653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141626654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,730 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,745 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201642dfa70c8f4ed79d360d158327ad0c_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201642dfa70c8f4ed79d360d158327ad0c_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:06,746 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/1b46b3e4cae44aba879cd10c7ffffeaf, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:06,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/1b46b3e4cae44aba879cd10c7ffffeaf is 175, key is test_row_0/A:col10/1732141566281/Put/seqid=0 2024-11-20T22:26:06,762 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:06,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:06,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:06,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:06,763 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742470_1646 (size=31251) 2024-11-20T22:26:06,783 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=265, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/1b46b3e4cae44aba879cd10c7ffffeaf 2024-11-20T22:26:06,797 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/0681f1f346c84ac0aeb69eff63d926a7 is 50, key is test_row_0/B:col10/1732141566281/Put/seqid=0 2024-11-20T22:26:06,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742471_1647 (size=9857) 2024-11-20T22:26:06,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T22:26:06,915 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:06,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:06,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:06,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:06,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141626956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141626963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:06,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141626963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,074 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:07,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:07,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:07,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:07,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/0681f1f346c84ac0aeb69eff63d926a7 2024-11-20T22:26:07,226 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:07,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:07,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:07,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:07,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/024824ca5b714084af649f14a2ca596a is 50, key is test_row_0/C:col10/1732141566281/Put/seqid=0 2024-11-20T22:26:07,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742472_1648 (size=9857) 2024-11-20T22:26:07,272 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/024824ca5b714084af649f14a2ca596a 2024-11-20T22:26:07,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/1b46b3e4cae44aba879cd10c7ffffeaf as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/1b46b3e4cae44aba879cd10c7ffffeaf 2024-11-20T22:26:07,282 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/1b46b3e4cae44aba879cd10c7ffffeaf, entries=150, sequenceid=265, filesize=30.5 K 2024-11-20T22:26:07,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/0681f1f346c84ac0aeb69eff63d926a7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/0681f1f346c84ac0aeb69eff63d926a7 2024-11-20T22:26:07,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/0681f1f346c84ac0aeb69eff63d926a7, entries=100, sequenceid=265, filesize=9.6 K 2024-11-20T22:26:07,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/024824ca5b714084af649f14a2ca596a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/024824ca5b714084af649f14a2ca596a 2024-11-20T22:26:07,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/024824ca5b714084af649f14a2ca596a, entries=100, sequenceid=265, filesize=9.6 K 2024-11-20T22:26:07,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for bacf880704606b04149d74a04b9ceb0e in 1013ms, sequenceid=265, compaction requested=true 2024-11-20T22:26:07,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:07,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:07,296 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:07,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:07,296 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:07,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:07,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:07,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:07,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:07,303 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94007 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:07,303 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/A is initiating minor compaction (all files) 2024-11-20T22:26:07,303 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/A in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:07,303 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/202ff77599274b879eba04ef3786becf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/81d607a652e84581b6b765264153a0d5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/1b46b3e4cae44aba879cd10c7ffffeaf] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=91.8 K 2024-11-20T22:26:07,303 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:07,303 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/202ff77599274b879eba04ef3786becf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/81d607a652e84581b6b765264153a0d5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/1b46b3e4cae44aba879cd10c7ffffeaf] 2024-11-20T22:26:07,305 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 202ff77599274b879eba04ef3786becf, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732141563977 2024-11-20T22:26:07,305 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34705 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:07,305 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/B is initiating minor compaction (all files) 2024-11-20T22:26:07,305 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/B in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:07,305 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/ee61610e93864c2db2420e53d554ce04, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/32f18a1deb0341f98ac0505dd25311a6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/0681f1f346c84ac0aeb69eff63d926a7] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=33.9 K 2024-11-20T22:26:07,306 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81d607a652e84581b6b765264153a0d5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732141564086 2024-11-20T22:26:07,306 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ee61610e93864c2db2420e53d554ce04, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732141563977 2024-11-20T22:26:07,306 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b46b3e4cae44aba879cd10c7ffffeaf, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1732141566255 2024-11-20T22:26:07,306 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 32f18a1deb0341f98ac0505dd25311a6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732141564086 2024-11-20T22:26:07,307 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 0681f1f346c84ac0aeb69eff63d926a7, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1732141566277 2024-11-20T22:26:07,316 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:07,317 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#B#compaction#542 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:07,317 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/533c1432a03f44dc866a76b869999c6d is 50, key is test_row_0/B:col10/1732141566281/Put/seqid=0 2024-11-20T22:26:07,320 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120dad02128d40d4004bc1ec8c7567a3d6f_bacf880704606b04149d74a04b9ceb0e store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:07,322 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120dad02128d40d4004bc1ec8c7567a3d6f_bacf880704606b04149d74a04b9ceb0e, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:07,322 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dad02128d40d4004bc1ec8c7567a3d6f_bacf880704606b04149d74a04b9ceb0e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:07,379 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:07,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:07,380 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:26:07,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:26:07,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:07,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:26:07,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:07,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:26:07,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:07,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742474_1650 (size=4469) 2024-11-20T22:26:07,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742473_1649 (size=12899) 2024-11-20T22:26:07,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T22:26:07,405 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/533c1432a03f44dc866a76b869999c6d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/533c1432a03f44dc866a76b869999c6d 2024-11-20T22:26:07,412 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/B of bacf880704606b04149d74a04b9ceb0e into 533c1432a03f44dc866a76b869999c6d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:07,412 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:07,412 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/B, priority=13, startTime=1732141567296; duration=0sec 2024-11-20T22:26:07,413 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:07,413 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:B 2024-11-20T22:26:07,413 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:07,415 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34705 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:07,415 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/C is initiating minor compaction (all files) 2024-11-20T22:26:07,415 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/C in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:07,415 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/5be380a7c9804f7b97c01e746aaf202d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/a48de99023ff4f058835fb6cdd46e708, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/024824ca5b714084af649f14a2ca596a] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=33.9 K 2024-11-20T22:26:07,415 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 5be380a7c9804f7b97c01e746aaf202d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732141563977 2024-11-20T22:26:07,416 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting a48de99023ff4f058835fb6cdd46e708, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732141564086 2024-11-20T22:26:07,416 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 024824ca5b714084af649f14a2ca596a, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1732141566277 2024-11-20T22:26:07,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e89d5dc0f855460ab28ce89654dba2e3_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141566347/Put/seqid=0 2024-11-20T22:26:07,456 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#C#compaction#545 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:07,457 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/a7790fca00c441e995d66a279d454092 is 50, key is test_row_0/C:col10/1732141566281/Put/seqid=0 2024-11-20T22:26:07,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:07,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:07,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742475_1651 (size=12454) 2024-11-20T22:26:07,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:07,482 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e89d5dc0f855460ab28ce89654dba2e3_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e89d5dc0f855460ab28ce89654dba2e3_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:07,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/fccce670b0aa47cd89ecbeee3a964508, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:07,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/fccce670b0aa47cd89ecbeee3a964508 is 175, key is test_row_0/A:col10/1732141566347/Put/seqid=0 2024-11-20T22:26:07,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141627486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141627490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141627493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742476_1652 (size=12899) 2024-11-20T22:26:07,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742477_1653 (size=31255) 2024-11-20T22:26:07,548 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/fccce670b0aa47cd89ecbeee3a964508 2024-11-20T22:26:07,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/2274906e86a147e391367d2a2f188dc0 is 50, key is test_row_0/B:col10/1732141566347/Put/seqid=0 2024-11-20T22:26:07,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742478_1654 (size=12301) 2024-11-20T22:26:07,567 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/2274906e86a147e391367d2a2f188dc0 2024-11-20T22:26:07,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/b2a05953115e4d91b528d8cf974fb108 is 50, key is test_row_0/C:col10/1732141566347/Put/seqid=0 2024-11-20T22:26:07,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141627595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141627602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141627604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742479_1655 (size=12301) 2024-11-20T22:26:07,782 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#A#compaction#543 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:07,783 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/822a5d8a50394c92b94b33672b4163ba is 175, key is test_row_0/A:col10/1732141566281/Put/seqid=0 2024-11-20T22:26:07,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742480_1656 (size=31960) 2024-11-20T22:26:07,799 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/822a5d8a50394c92b94b33672b4163ba as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/822a5d8a50394c92b94b33672b4163ba 2024-11-20T22:26:07,804 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/A of bacf880704606b04149d74a04b9ceb0e into 822a5d8a50394c92b94b33672b4163ba(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:07,804 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:07,804 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/A, priority=13, startTime=1732141567296; duration=0sec 2024-11-20T22:26:07,804 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:07,804 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:A 2024-11-20T22:26:07,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141627804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141627808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141627810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:07,943 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/a7790fca00c441e995d66a279d454092 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/a7790fca00c441e995d66a279d454092 2024-11-20T22:26:07,966 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/C of bacf880704606b04149d74a04b9ceb0e into a7790fca00c441e995d66a279d454092(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:07,967 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:07,967 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/C, priority=13, startTime=1732141567296; duration=0sec 2024-11-20T22:26:07,967 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:07,967 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:C 2024-11-20T22:26:08,014 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/b2a05953115e4d91b528d8cf974fb108 2024-11-20T22:26:08,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/fccce670b0aa47cd89ecbeee3a964508 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/fccce670b0aa47cd89ecbeee3a964508 2024-11-20T22:26:08,053 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/fccce670b0aa47cd89ecbeee3a964508, entries=150, sequenceid=290, filesize=30.5 K 2024-11-20T22:26:08,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/2274906e86a147e391367d2a2f188dc0 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/2274906e86a147e391367d2a2f188dc0 2024-11-20T22:26:08,069 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/2274906e86a147e391367d2a2f188dc0, entries=150, sequenceid=290, filesize=12.0 K 2024-11-20T22:26:08,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/b2a05953115e4d91b528d8cf974fb108 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/b2a05953115e4d91b528d8cf974fb108 2024-11-20T22:26:08,094 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/b2a05953115e4d91b528d8cf974fb108, entries=150, sequenceid=290, filesize=12.0 K 2024-11-20T22:26:08,094 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for bacf880704606b04149d74a04b9ceb0e in 714ms, sequenceid=290, compaction requested=false 2024-11-20T22:26:08,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:08,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:08,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-20T22:26:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-20T22:26:08,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-20T22:26:08,111 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7980 sec 2024-11-20T22:26:08,116 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 1.8150 sec 2024-11-20T22:26:08,117 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:26:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:08,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:26:08,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:08,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:26:08,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:08,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:26:08,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:08,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b45c751d13af4afc95bbd31f386098ef_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141568115/Put/seqid=0 2024-11-20T22:26:08,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742481_1657 (size=14994) 2024-11-20T22:26:08,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141628167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141628169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141628170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141628274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141628275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141628275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T22:26:08,407 INFO [Thread-2535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-20T22:26:08,408 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-20T22:26:08,410 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:08,411 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:08,411 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:08,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141628479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141628480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141628487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:08,556 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,559 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b45c751d13af4afc95bbd31f386098ef_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b45c751d13af4afc95bbd31f386098ef_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:08,560 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/0ac01fe8e2f64f1fa0b7d8daa1e1cabc, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:08,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/0ac01fe8e2f64f1fa0b7d8daa1e1cabc is 175, key is test_row_0/A:col10/1732141568115/Put/seqid=0 2024-11-20T22:26:08,566 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T22:26:08,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:08,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:08,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:08,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:08,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:08,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:08,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742482_1658 (size=39949) 2024-11-20T22:26:08,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:08,722 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T22:26:08,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:08,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:08,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:08,723 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:08,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:08,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:08,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141628787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141628791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141628795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,875 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:08,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T22:26:08,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:08,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:08,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:08,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:08,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:08,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:08,995 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=306, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/0ac01fe8e2f64f1fa0b7d8daa1e1cabc 2024-11-20T22:26:09,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/21cc62b924294969b7a8cf58f89d7cb1 is 50, key is test_row_0/B:col10/1732141568115/Put/seqid=0 2024-11-20T22:26:09,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:09,031 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T22:26:09,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:09,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:09,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:09,031 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:09,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:09,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:09,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742483_1659 (size=12301) 2024-11-20T22:26:09,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/21cc62b924294969b7a8cf58f89d7cb1 2024-11-20T22:26:09,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/c48922ff989b49d984dce2e5905562a3 is 50, key is test_row_0/C:col10/1732141568115/Put/seqid=0 2024-11-20T22:26:09,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742484_1660 (size=12301) 2024-11-20T22:26:09,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/c48922ff989b49d984dce2e5905562a3 2024-11-20T22:26:09,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/0ac01fe8e2f64f1fa0b7d8daa1e1cabc as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0ac01fe8e2f64f1fa0b7d8daa1e1cabc 2024-11-20T22:26:09,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0ac01fe8e2f64f1fa0b7d8daa1e1cabc, entries=200, sequenceid=306, filesize=39.0 K 2024-11-20T22:26:09,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/21cc62b924294969b7a8cf58f89d7cb1 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/21cc62b924294969b7a8cf58f89d7cb1 2024-11-20T22:26:09,130 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/21cc62b924294969b7a8cf58f89d7cb1, entries=150, sequenceid=306, filesize=12.0 K 2024-11-20T22:26:09,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/c48922ff989b49d984dce2e5905562a3 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/c48922ff989b49d984dce2e5905562a3 2024-11-20T22:26:09,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/c48922ff989b49d984dce2e5905562a3, entries=150, sequenceid=306, filesize=12.0 K 2024-11-20T22:26:09,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for bacf880704606b04149d74a04b9ceb0e in 1021ms, sequenceid=306, compaction requested=true 2024-11-20T22:26:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:26:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T22:26:09,139 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:09,139 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:09,141 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103164 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:09,141 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/A is initiating minor compaction (all files) 2024-11-20T22:26:09,142 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/A in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:09,142 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/822a5d8a50394c92b94b33672b4163ba, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/fccce670b0aa47cd89ecbeee3a964508, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0ac01fe8e2f64f1fa0b7d8daa1e1cabc] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=100.7 K 2024-11-20T22:26:09,142 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:09,142 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/822a5d8a50394c92b94b33672b4163ba, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/fccce670b0aa47cd89ecbeee3a964508, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0ac01fe8e2f64f1fa0b7d8daa1e1cabc] 2024-11-20T22:26:09,143 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:09,143 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/C is initiating minor compaction (all files) 2024-11-20T22:26:09,143 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/C in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:09,144 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/a7790fca00c441e995d66a279d454092, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/b2a05953115e4d91b528d8cf974fb108, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/c48922ff989b49d984dce2e5905562a3] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=36.6 K 2024-11-20T22:26:09,145 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting a7790fca00c441e995d66a279d454092, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1732141564086 2024-11-20T22:26:09,145 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 822a5d8a50394c92b94b33672b4163ba, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1732141564086 2024-11-20T22:26:09,146 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting b2a05953115e4d91b528d8cf974fb108, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732141566345 2024-11-20T22:26:09,146 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting fccce670b0aa47cd89ecbeee3a964508, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732141566345 2024-11-20T22:26:09,146 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting c48922ff989b49d984dce2e5905562a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1732141567489 2024-11-20T22:26:09,146 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ac01fe8e2f64f1fa0b7d8daa1e1cabc, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1732141567478 2024-11-20T22:26:09,171 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:09,179 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#C#compaction#552 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:09,179 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/29d7b219cbff4d08aed5b9e6aaf7c30b is 50, key is test_row_0/C:col10/1732141568115/Put/seqid=0 2024-11-20T22:26:09,186 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,187 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44631 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T22:26:09,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:09,188 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:26:09,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:26:09,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:09,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:26:09,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:09,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:26:09,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:09,190 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411200e25001833344adba2cd715d0df1ca75_bacf880704606b04149d74a04b9ceb0e store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:09,193 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411200e25001833344adba2cd715d0df1ca75_bacf880704606b04149d74a04b9ceb0e, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:09,193 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200e25001833344adba2cd715d0df1ca75_bacf880704606b04149d74a04b9ceb0e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:09,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742485_1661 (size=13051) 2024-11-20T22:26:09,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201e5b08d783904ce9849042672e273f46_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141568168/Put/seqid=0 2024-11-20T22:26:09,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742486_1662 (size=4469) 2024-11-20T22:26:09,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:09,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. as already flushing 2024-11-20T22:26:09,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742487_1663 (size=12454) 2024-11-20T22:26:09,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141629335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141629347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141629351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141629447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141629454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141629460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:09,645 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/29d7b219cbff4d08aed5b9e6aaf7c30b as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/29d7b219cbff4d08aed5b9e6aaf7c30b 2024-11-20T22:26:09,651 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/C of bacf880704606b04149d74a04b9ceb0e into 29d7b219cbff4d08aed5b9e6aaf7c30b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:09,651 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:09,651 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/C, priority=13, startTime=1732141569138; duration=0sec 2024-11-20T22:26:09,651 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:09,651 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:C 2024-11-20T22:26:09,652 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:09,653 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:09,653 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/B is initiating minor compaction (all files) 2024-11-20T22:26:09,653 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/B in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:09,653 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/533c1432a03f44dc866a76b869999c6d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/2274906e86a147e391367d2a2f188dc0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/21cc62b924294969b7a8cf58f89d7cb1] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=36.6 K 2024-11-20T22:26:09,654 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 533c1432a03f44dc866a76b869999c6d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1732141564086 2024-11-20T22:26:09,654 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 2274906e86a147e391367d2a2f188dc0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732141566345 2024-11-20T22:26:09,654 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 21cc62b924294969b7a8cf58f89d7cb1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1732141567489 2024-11-20T22:26:09,669 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#B#compaction#554 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:09,669 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/bece1de248a94f7b90e30e270895f479 is 50, key is test_row_0/B:col10/1732141568115/Put/seqid=0 2024-11-20T22:26:09,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141629662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141629666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141629666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,678 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#A#compaction#551 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:09,678 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/73d62a28f536412f92594f457bacffc4 is 175, key is test_row_0/A:col10/1732141568115/Put/seqid=0 2024-11-20T22:26:09,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,716 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201e5b08d783904ce9849042672e273f46_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201e5b08d783904ce9849042672e273f46_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:09,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/d220bc732a644aa38f428c6b88a24a27, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:09,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/d220bc732a644aa38f428c6b88a24a27 is 175, key is test_row_0/A:col10/1732141568168/Put/seqid=0 2024-11-20T22:26:09,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742488_1664 (size=13051) 2024-11-20T22:26:09,736 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/bece1de248a94f7b90e30e270895f479 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/bece1de248a94f7b90e30e270895f479 2024-11-20T22:26:09,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742489_1665 (size=32005) 2024-11-20T22:26:09,744 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/B of bacf880704606b04149d74a04b9ceb0e into bece1de248a94f7b90e30e270895f479(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:09,744 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:09,744 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/B, priority=13, startTime=1732141569138; duration=0sec 2024-11-20T22:26:09,745 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:09,745 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:B 2024-11-20T22:26:09,749 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/73d62a28f536412f92594f457bacffc4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/73d62a28f536412f92594f457bacffc4 2024-11-20T22:26:09,754 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/A of bacf880704606b04149d74a04b9ceb0e into 73d62a28f536412f92594f457bacffc4(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:09,754 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:09,754 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/A, priority=13, startTime=1732141569138; duration=0sec 2024-11-20T22:26:09,754 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:09,755 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:A 2024-11-20T22:26:09,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742490_1666 (size=31255) 2024-11-20T22:26:09,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141629972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141629973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:09,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141629977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:10,179 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/d220bc732a644aa38f428c6b88a24a27 2024-11-20T22:26:10,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/f9b07389372548bebe77fcbde9e16fa3 is 50, key is test_row_0/B:col10/1732141568168/Put/seqid=0 2024-11-20T22:26:10,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742491_1667 (size=12301) 2024-11-20T22:26:10,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141630478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:10,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141630481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:10,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141630485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:10,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:10,688 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/f9b07389372548bebe77fcbde9e16fa3 2024-11-20T22:26:10,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/d8dacfce40aa4547a5c092002f1600a5 is 50, key is test_row_0/C:col10/1732141568168/Put/seqid=0 2024-11-20T22:26:10,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742492_1668 (size=12301) 2024-11-20T22:26:10,743 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/d8dacfce40aa4547a5c092002f1600a5 2024-11-20T22:26:10,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/d220bc732a644aa38f428c6b88a24a27 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/d220bc732a644aa38f428c6b88a24a27 2024-11-20T22:26:10,753 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/d220bc732a644aa38f428c6b88a24a27, entries=150, sequenceid=330, filesize=30.5 K 2024-11-20T22:26:10,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/f9b07389372548bebe77fcbde9e16fa3 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/f9b07389372548bebe77fcbde9e16fa3 2024-11-20T22:26:10,758 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/f9b07389372548bebe77fcbde9e16fa3, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:26:10,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/d8dacfce40aa4547a5c092002f1600a5 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/d8dacfce40aa4547a5c092002f1600a5 2024-11-20T22:26:10,765 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/d8dacfce40aa4547a5c092002f1600a5, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:26:10,766 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for bacf880704606b04149d74a04b9ceb0e in 1577ms, sequenceid=330, compaction requested=false 2024-11-20T22:26:10,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:10,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:10,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-20T22:26:10,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-20T22:26:10,768 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-20T22:26:10,769 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3560 sec 2024-11-20T22:26:10,772 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 2.3610 sec 2024-11-20T22:26:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:11,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:26:11,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:26:11,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:11,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:26:11,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:11,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:26:11,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:11,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202cdf2add2fd14dd7a9d02e3796a24812_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141569332/Put/seqid=0 2024-11-20T22:26:11,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742493_1669 (size=12454) 2024-11-20T22:26:11,522 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:11,526 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202cdf2add2fd14dd7a9d02e3796a24812_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202cdf2add2fd14dd7a9d02e3796a24812_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:11,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141631521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:11,529 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/ed7239c78cac41efb6e42878c04cda88, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:11,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141631526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:11,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/ed7239c78cac41efb6e42878c04cda88 is 175, key is test_row_0/A:col10/1732141569332/Put/seqid=0 2024-11-20T22:26:11,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141631528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:11,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742494_1670 (size=31255) 2024-11-20T22:26:11,560 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=347, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/ed7239c78cac41efb6e42878c04cda88 2024-11-20T22:26:11,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/79adc2759d924d82b371fdf86efe10ac is 50, key is test_row_0/B:col10/1732141569332/Put/seqid=0 2024-11-20T22:26:11,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742495_1671 (size=12301) 2024-11-20T22:26:11,615 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/79adc2759d924d82b371fdf86efe10ac 2024-11-20T22:26:11,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52082 deadline: 1732141631629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:11,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/ec49a2dc5e6147a386d635225335cf85 is 50, key is test_row_0/C:col10/1732141569332/Put/seqid=0 2024-11-20T22:26:11,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52096 deadline: 1732141631631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:11,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44631 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52040 deadline: 1732141631635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:11,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742496_1672 (size=12301) 2024-11-20T22:26:11,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/ec49a2dc5e6147a386d635225335cf85 2024-11-20T22:26:11,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/ed7239c78cac41efb6e42878c04cda88 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ed7239c78cac41efb6e42878c04cda88 2024-11-20T22:26:11,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ed7239c78cac41efb6e42878c04cda88, entries=150, sequenceid=347, filesize=30.5 K 2024-11-20T22:26:11,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/79adc2759d924d82b371fdf86efe10ac as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/79adc2759d924d82b371fdf86efe10ac 2024-11-20T22:26:11,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/79adc2759d924d82b371fdf86efe10ac, entries=150, sequenceid=347, filesize=12.0 K 2024-11-20T22:26:11,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/ec49a2dc5e6147a386d635225335cf85 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ec49a2dc5e6147a386d635225335cf85 2024-11-20T22:26:11,708 DEBUG [Thread-2544 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51fccca6 to 127.0.0.1:51822 2024-11-20T22:26:11,708 DEBUG [Thread-2544 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:11,710 DEBUG [Thread-2542 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3fbb1399 to 127.0.0.1:51822 2024-11-20T22:26:11,710 DEBUG [Thread-2542 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:11,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ec49a2dc5e6147a386d635225335cf85, entries=150, sequenceid=347, filesize=12.0 K 2024-11-20T22:26:11,712 DEBUG [Thread-2540 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63054209 to 127.0.0.1:51822 2024-11-20T22:26:11,712 DEBUG [Thread-2540 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:11,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for bacf880704606b04149d74a04b9ceb0e in 223ms, sequenceid=347, compaction requested=true 2024-11-20T22:26:11,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:11,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:11,713 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:11,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:11,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:11,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:11,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bacf880704606b04149d74a04b9ceb0e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:11,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:26:11,713 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:11,714 DEBUG [Thread-2538 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d02ace0 to 127.0.0.1:51822 2024-11-20T22:26:11,714 DEBUG [Thread-2538 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:11,714 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:11,714 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94515 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:11,714 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/B is initiating minor compaction (all files) 2024-11-20T22:26:11,714 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/A is initiating minor compaction (all files) 2024-11-20T22:26:11,714 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/B in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:11,714 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/bece1de248a94f7b90e30e270895f479, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/f9b07389372548bebe77fcbde9e16fa3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/79adc2759d924d82b371fdf86efe10ac] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=36.8 K 2024-11-20T22:26:11,715 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/A in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:11,715 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/73d62a28f536412f92594f457bacffc4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/d220bc732a644aa38f428c6b88a24a27, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ed7239c78cac41efb6e42878c04cda88] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=92.3 K 2024-11-20T22:26:11,715 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:11,715 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. files: [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/73d62a28f536412f92594f457bacffc4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/d220bc732a644aa38f428c6b88a24a27, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ed7239c78cac41efb6e42878c04cda88] 2024-11-20T22:26:11,715 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 73d62a28f536412f92594f457bacffc4, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1732141567489 2024-11-20T22:26:11,715 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting bece1de248a94f7b90e30e270895f479, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1732141567489 2024-11-20T22:26:11,715 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting d220bc732a644aa38f428c6b88a24a27, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141568159 2024-11-20T22:26:11,716 DEBUG [Thread-2536 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62b06a95 to 127.0.0.1:51822 2024-11-20T22:26:11,716 DEBUG [Thread-2536 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:11,716 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ed7239c78cac41efb6e42878c04cda88, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1732141569332 2024-11-20T22:26:11,716 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9b07389372548bebe77fcbde9e16fa3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141568159 2024-11-20T22:26:11,717 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79adc2759d924d82b371fdf86efe10ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1732141569332 2024-11-20T22:26:11,725 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:11,727 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112017b66281e9f1408398e18e278c2c6565_bacf880704606b04149d74a04b9ceb0e store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:11,729 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112017b66281e9f1408398e18e278c2c6565_bacf880704606b04149d74a04b9ceb0e, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:11,729 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112017b66281e9f1408398e18e278c2c6565_bacf880704606b04149d74a04b9ceb0e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:11,732 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#B#compaction#561 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:11,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742497_1673 (size=4469) 2024-11-20T22:26:11,733 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/37b9cfe5654048febef4ae93f496693d is 50, key is test_row_0/B:col10/1732141569332/Put/seqid=0 2024-11-20T22:26:11,733 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#A#compaction#560 average throughput is 3.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:11,734 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/2a609136f62b4c4c99381323e5e5b827 is 175, key is test_row_0/A:col10/1732141569332/Put/seqid=0 2024-11-20T22:26:11,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742498_1674 (size=32107) 2024-11-20T22:26:11,764 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/2a609136f62b4c4c99381323e5e5b827 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2a609136f62b4c4c99381323e5e5b827 2024-11-20T22:26:11,769 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/A of bacf880704606b04149d74a04b9ceb0e into 2a609136f62b4c4c99381323e5e5b827(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:11,769 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:11,769 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/A, priority=13, startTime=1732141571713; duration=0sec 2024-11-20T22:26:11,769 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:11,769 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:A 2024-11-20T22:26:11,769 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:11,770 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:11,770 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1540): bacf880704606b04149d74a04b9ceb0e/C is initiating minor compaction (all files) 2024-11-20T22:26:11,770 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bacf880704606b04149d74a04b9ceb0e/C in TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:11,770 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/29d7b219cbff4d08aed5b9e6aaf7c30b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/d8dacfce40aa4547a5c092002f1600a5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ec49a2dc5e6147a386d635225335cf85] into tmpdir=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp, totalSize=36.8 K 2024-11-20T22:26:11,771 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting 29d7b219cbff4d08aed5b9e6aaf7c30b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1732141567489 2024-11-20T22:26:11,771 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting d8dacfce40aa4547a5c092002f1600a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141568159 2024-11-20T22:26:11,771 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] compactions.Compactor(224): Compacting ec49a2dc5e6147a386d635225335cf85, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1732141569332 2024-11-20T22:26:11,800 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bacf880704606b04149d74a04b9ceb0e#C#compaction#562 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:11,801 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/317e27ddfaa74e2680954edcfd0200a4 is 50, key is test_row_0/C:col10/1732141569332/Put/seqid=0 2024-11-20T22:26:11,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742499_1675 (size=13153) 2024-11-20T22:26:11,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742500_1676 (size=13153) 2024-11-20T22:26:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44631 {}] regionserver.HRegion(8581): Flush requested on bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:11,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:26:11,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:26:11,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:11,835 DEBUG [Thread-2525 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2089b1f4 to 127.0.0.1:51822 2024-11-20T22:26:11,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:26:11,835 DEBUG [Thread-2525 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:11,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:11,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:26:11,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:11,839 DEBUG [Thread-2527 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65f51785 to 127.0.0.1:51822 2024-11-20T22:26:11,839 DEBUG [Thread-2529 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3cc71f2e to 127.0.0.1:51822 2024-11-20T22:26:11,839 DEBUG [Thread-2527 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:11,839 DEBUG [Thread-2529 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:11,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201ce227a7661c4921b62cb3a72cffb989_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141571524/Put/seqid=0 2024-11-20T22:26:11,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742501_1677 (size=12454) 2024-11-20T22:26:11,859 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:11,862 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201ce227a7661c4921b62cb3a72cffb989_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201ce227a7661c4921b62cb3a72cffb989_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:11,862 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/838a3add16b140f998b181113b374c7f, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:11,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/838a3add16b140f998b181113b374c7f is 175, key is test_row_0/A:col10/1732141571524/Put/seqid=0 2024-11-20T22:26:11,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742502_1678 (size=31255) 2024-11-20T22:26:11,869 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=371, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/838a3add16b140f998b181113b374c7f 2024-11-20T22:26:11,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/98b73669317a4c0d9d5f19cbd079df06 is 50, key is test_row_0/B:col10/1732141571524/Put/seqid=0 2024-11-20T22:26:11,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742503_1679 (size=12301) 2024-11-20T22:26:11,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/98b73669317a4c0d9d5f19cbd079df06 2024-11-20T22:26:11,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/958a5003515a426d89af45e8142b240f is 50, key is test_row_0/C:col10/1732141571524/Put/seqid=0 2024-11-20T22:26:11,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742504_1680 (size=12301) 2024-11-20T22:26:11,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/958a5003515a426d89af45e8142b240f 2024-11-20T22:26:11,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/838a3add16b140f998b181113b374c7f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/838a3add16b140f998b181113b374c7f 2024-11-20T22:26:11,907 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/838a3add16b140f998b181113b374c7f, entries=150, sequenceid=371, filesize=30.5 K 2024-11-20T22:26:11,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/98b73669317a4c0d9d5f19cbd079df06 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/98b73669317a4c0d9d5f19cbd079df06 2024-11-20T22:26:11,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/98b73669317a4c0d9d5f19cbd079df06, entries=150, sequenceid=371, filesize=12.0 K 2024-11-20T22:26:11,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/958a5003515a426d89af45e8142b240f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/958a5003515a426d89af45e8142b240f 2024-11-20T22:26:11,916 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/958a5003515a426d89af45e8142b240f, entries=150, sequenceid=371, filesize=12.0 K 2024-11-20T22:26:11,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=13.42 KB/13740 for bacf880704606b04149d74a04b9ceb0e in 82ms, sequenceid=371, compaction requested=false 2024-11-20T22:26:11,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:12,206 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/37b9cfe5654048febef4ae93f496693d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/37b9cfe5654048febef4ae93f496693d 2024-11-20T22:26:12,210 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/B of bacf880704606b04149d74a04b9ceb0e into 37b9cfe5654048febef4ae93f496693d(size=12.8 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:12,210 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:12,210 INFO [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/B, priority=13, startTime=1732141571713; duration=0sec 2024-11-20T22:26:12,210 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:12,210 DEBUG [RS:0;6365a1e51efd:44631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:B 2024-11-20T22:26:12,231 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/317e27ddfaa74e2680954edcfd0200a4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/317e27ddfaa74e2680954edcfd0200a4 2024-11-20T22:26:12,235 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bacf880704606b04149d74a04b9ceb0e/C of bacf880704606b04149d74a04b9ceb0e into 317e27ddfaa74e2680954edcfd0200a4(size=12.8 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:12,235 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:12,235 INFO [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e., storeName=bacf880704606b04149d74a04b9ceb0e/C, priority=13, startTime=1732141571713; duration=0sec 2024-11-20T22:26:12,235 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:12,235 DEBUG [RS:0;6365a1e51efd:44631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bacf880704606b04149d74a04b9ceb0e:C 2024-11-20T22:26:12,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:12,517 INFO [Thread-2535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-20T22:26:13,271 DEBUG [Thread-2531 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79a7bd2b to 127.0.0.1:51822 2024-11-20T22:26:13,271 DEBUG [Thread-2531 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:13,321 DEBUG [Thread-2533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d688bcb to 127.0.0.1:51822 2024-11-20T22:26:13,321 DEBUG [Thread-2533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 85 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 99 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 17 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 14 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3845 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3685 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3673 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3823 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3782 2024-11-20T22:26:13,322 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T22:26:13,322 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:26:13,322 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x151bac0d to 127.0.0.1:51822 2024-11-20T22:26:13,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:13,327 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T22:26:13,327 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T22:26:13,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:13,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T22:26:13,330 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141573330"}]},"ts":"1732141573330"} 2024-11-20T22:26:13,331 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T22:26:13,358 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T22:26:13,359 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:26:13,361 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bacf880704606b04149d74a04b9ceb0e, UNASSIGN}] 2024-11-20T22:26:13,362 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bacf880704606b04149d74a04b9ceb0e, UNASSIGN 2024-11-20T22:26:13,363 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=bacf880704606b04149d74a04b9ceb0e, regionState=CLOSING, regionLocation=6365a1e51efd,44631,1732141399950 2024-11-20T22:26:13,369 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:26:13,369 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; CloseRegionProcedure bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950}] 2024-11-20T22:26:13,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T22:26:13,521 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:13,521 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] handler.UnassignRegionHandler(124): Close bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:13,522 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:26:13,522 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HRegion(1681): Closing bacf880704606b04149d74a04b9ceb0e, disabling compactions & flushes 2024-11-20T22:26:13,522 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:13,522 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:13,522 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. after waiting 0 ms 2024-11-20T22:26:13,522 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:13,522 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HRegion(2837): Flushing bacf880704606b04149d74a04b9ceb0e 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T22:26:13,522 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=A 2024-11-20T22:26:13,522 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:13,522 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=B 2024-11-20T22:26:13,522 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:13,522 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bacf880704606b04149d74a04b9ceb0e, store=C 2024-11-20T22:26:13,522 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:13,535 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202fb9e3c6d4c740b2b88102da21f79373_bacf880704606b04149d74a04b9ceb0e is 50, key is test_row_0/A:col10/1732141573320/Put/seqid=0 2024-11-20T22:26:13,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742505_1681 (size=12454) 2024-11-20T22:26:13,565 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,568 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202fb9e3c6d4c740b2b88102da21f79373_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202fb9e3c6d4c740b2b88102da21f79373_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:13,568 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/2773da85e75c49f89b9e059d9dedb997, store: [table=TestAcidGuarantees family=A region=bacf880704606b04149d74a04b9ceb0e] 2024-11-20T22:26:13,569 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/2773da85e75c49f89b9e059d9dedb997 is 175, key is test_row_0/A:col10/1732141573320/Put/seqid=0 2024-11-20T22:26:13,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742506_1682 (size=31255) 2024-11-20T22:26:13,585 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=380, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/2773da85e75c49f89b9e059d9dedb997 2024-11-20T22:26:13,593 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/39577715e5164c09bcee382a1dda0fb7 is 50, key is test_row_0/B:col10/1732141573320/Put/seqid=0 2024-11-20T22:26:13,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742507_1683 (size=12301) 2024-11-20T22:26:13,628 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/39577715e5164c09bcee382a1dda0fb7 2024-11-20T22:26:13,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T22:26:13,635 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/84718d960c3144bb88da006701a271b4 is 50, key is test_row_0/C:col10/1732141573320/Put/seqid=0 2024-11-20T22:26:13,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742508_1684 (size=12301) 2024-11-20T22:26:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T22:26:14,050 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/84718d960c3144bb88da006701a271b4 2024-11-20T22:26:14,056 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/A/2773da85e75c49f89b9e059d9dedb997 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2773da85e75c49f89b9e059d9dedb997 2024-11-20T22:26:14,060 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2773da85e75c49f89b9e059d9dedb997, entries=150, sequenceid=380, filesize=30.5 K 2024-11-20T22:26:14,060 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/B/39577715e5164c09bcee382a1dda0fb7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/39577715e5164c09bcee382a1dda0fb7 2024-11-20T22:26:14,065 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/39577715e5164c09bcee382a1dda0fb7, entries=150, sequenceid=380, filesize=12.0 K 2024-11-20T22:26:14,066 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/.tmp/C/84718d960c3144bb88da006701a271b4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/84718d960c3144bb88da006701a271b4 2024-11-20T22:26:14,069 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/84718d960c3144bb88da006701a271b4, entries=150, sequenceid=380, filesize=12.0 K 2024-11-20T22:26:14,070 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for bacf880704606b04149d74a04b9ceb0e in 548ms, sequenceid=380, compaction requested=true 2024-11-20T22:26:14,075 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ad81fb331931472b85218348bc432264, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b61e7aa507e143e29e2781524b86c106, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/8705f7cbf1f84b4dae5dd4e6b9671e48, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2ed5f848b91e48b1a13f4ba7594f7b4d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/87c476c7f3e14b3f907958a8d4eb3fd3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ae48ef50907540999add71f47a936db5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/5e2059836b8349a9a56a7230dd3782e5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/a38d15dc680141c3bff9bfcb22f83184, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/50a69b4bad894a4a9fcdda2cada9bad5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/4dfd3b756e9e47e8a64decfea5dc9577, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/3a3899d4a7504384a1542e183c812019, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2b009c721f7245b7b74c86cfbc12cecf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/f56e7fbdeb9345e1a8482fef0015284d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/57f014a1c94e4ecea4105004254eb7a0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b608ec31b63549b3acc1f74dcefdc954, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0c6fd2b8acbe40b6b8d163171b8582af, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/202ff77599274b879eba04ef3786becf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/81d607a652e84581b6b765264153a0d5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/822a5d8a50394c92b94b33672b4163ba, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/1b46b3e4cae44aba879cd10c7ffffeaf, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/fccce670b0aa47cd89ecbeee3a964508, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0ac01fe8e2f64f1fa0b7d8daa1e1cabc, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/73d62a28f536412f92594f457bacffc4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/d220bc732a644aa38f428c6b88a24a27, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ed7239c78cac41efb6e42878c04cda88] to archive 2024-11-20T22:26:14,079 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:26:14,081 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ad81fb331931472b85218348bc432264 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ad81fb331931472b85218348bc432264 2024-11-20T22:26:14,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b61e7aa507e143e29e2781524b86c106 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b61e7aa507e143e29e2781524b86c106 2024-11-20T22:26:14,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/8705f7cbf1f84b4dae5dd4e6b9671e48 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/8705f7cbf1f84b4dae5dd4e6b9671e48 2024-11-20T22:26:14,094 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2ed5f848b91e48b1a13f4ba7594f7b4d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2ed5f848b91e48b1a13f4ba7594f7b4d 2024-11-20T22:26:14,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/87c476c7f3e14b3f907958a8d4eb3fd3 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/87c476c7f3e14b3f907958a8d4eb3fd3 2024-11-20T22:26:14,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ae48ef50907540999add71f47a936db5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ae48ef50907540999add71f47a936db5 2024-11-20T22:26:14,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/5e2059836b8349a9a56a7230dd3782e5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/5e2059836b8349a9a56a7230dd3782e5 2024-11-20T22:26:14,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/a38d15dc680141c3bff9bfcb22f83184 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/a38d15dc680141c3bff9bfcb22f83184 2024-11-20T22:26:14,113 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/50a69b4bad894a4a9fcdda2cada9bad5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/50a69b4bad894a4a9fcdda2cada9bad5 2024-11-20T22:26:14,115 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/4dfd3b756e9e47e8a64decfea5dc9577 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/4dfd3b756e9e47e8a64decfea5dc9577 2024-11-20T22:26:14,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/3a3899d4a7504384a1542e183c812019 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/3a3899d4a7504384a1542e183c812019 2024-11-20T22:26:14,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2b009c721f7245b7b74c86cfbc12cecf to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2b009c721f7245b7b74c86cfbc12cecf 2024-11-20T22:26:14,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/f56e7fbdeb9345e1a8482fef0015284d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/f56e7fbdeb9345e1a8482fef0015284d 2024-11-20T22:26:14,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/57f014a1c94e4ecea4105004254eb7a0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/57f014a1c94e4ecea4105004254eb7a0 2024-11-20T22:26:14,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b608ec31b63549b3acc1f74dcefdc954 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/b608ec31b63549b3acc1f74dcefdc954 2024-11-20T22:26:14,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0c6fd2b8acbe40b6b8d163171b8582af to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0c6fd2b8acbe40b6b8d163171b8582af 2024-11-20T22:26:14,123 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/202ff77599274b879eba04ef3786becf to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/202ff77599274b879eba04ef3786becf 2024-11-20T22:26:14,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/81d607a652e84581b6b765264153a0d5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/81d607a652e84581b6b765264153a0d5 2024-11-20T22:26:14,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/822a5d8a50394c92b94b33672b4163ba to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/822a5d8a50394c92b94b33672b4163ba 2024-11-20T22:26:14,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/1b46b3e4cae44aba879cd10c7ffffeaf to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/1b46b3e4cae44aba879cd10c7ffffeaf 2024-11-20T22:26:14,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/fccce670b0aa47cd89ecbeee3a964508 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/fccce670b0aa47cd89ecbeee3a964508 2024-11-20T22:26:14,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0ac01fe8e2f64f1fa0b7d8daa1e1cabc to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/0ac01fe8e2f64f1fa0b7d8daa1e1cabc 2024-11-20T22:26:14,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/73d62a28f536412f92594f457bacffc4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/73d62a28f536412f92594f457bacffc4 2024-11-20T22:26:14,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/d220bc732a644aa38f428c6b88a24a27 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/d220bc732a644aa38f428c6b88a24a27 2024-11-20T22:26:14,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ed7239c78cac41efb6e42878c04cda88 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/ed7239c78cac41efb6e42878c04cda88 2024-11-20T22:26:14,147 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/e415b976194f44d4a4530135aad71ea5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/b69be3a705a6484282e8246ea4a2a2db, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/073c661fff114780abbe32bcac8ba704, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/66a38c00149a4403b1c60f424b85d4ce, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/61ffae69605040f5af0b3c671efc0a01, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/2efedca84ead4590b196b328e48a9514, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/ab09beb5378149ad85498c5f3e1bb601, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/4b51c9ad66974c858cf016bc9ba05725, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/dc7c621b0fe34484b61405bf5fe60dc1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/cec73974f8894716817cdf1b76ce9f92, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/38a4955662134fdeb6dead271c5e6718, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/e8cd84c4defc4116bbc1382445c3e44a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/5e608102d6e84713809ac219f1a94123, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/6f1d232380284b0390943aba391fe930, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/8996ba7ae2f74acc932b5f585b3e1533, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/ee61610e93864c2db2420e53d554ce04, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/effb1230864e4465b68a76cd4943407e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/32f18a1deb0341f98ac0505dd25311a6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/533c1432a03f44dc866a76b869999c6d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/0681f1f346c84ac0aeb69eff63d926a7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/2274906e86a147e391367d2a2f188dc0, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/bece1de248a94f7b90e30e270895f479, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/21cc62b924294969b7a8cf58f89d7cb1, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/f9b07389372548bebe77fcbde9e16fa3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/79adc2759d924d82b371fdf86efe10ac] to archive 2024-11-20T22:26:14,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:26:14,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/e415b976194f44d4a4530135aad71ea5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/e415b976194f44d4a4530135aad71ea5 2024-11-20T22:26:14,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/b69be3a705a6484282e8246ea4a2a2db to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/b69be3a705a6484282e8246ea4a2a2db 2024-11-20T22:26:14,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/073c661fff114780abbe32bcac8ba704 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/073c661fff114780abbe32bcac8ba704 2024-11-20T22:26:14,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/66a38c00149a4403b1c60f424b85d4ce to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/66a38c00149a4403b1c60f424b85d4ce 2024-11-20T22:26:14,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/61ffae69605040f5af0b3c671efc0a01 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/61ffae69605040f5af0b3c671efc0a01 2024-11-20T22:26:14,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/2efedca84ead4590b196b328e48a9514 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/2efedca84ead4590b196b328e48a9514 2024-11-20T22:26:14,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/ab09beb5378149ad85498c5f3e1bb601 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/ab09beb5378149ad85498c5f3e1bb601 2024-11-20T22:26:14,158 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/4b51c9ad66974c858cf016bc9ba05725 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/4b51c9ad66974c858cf016bc9ba05725 2024-11-20T22:26:14,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/dc7c621b0fe34484b61405bf5fe60dc1 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/dc7c621b0fe34484b61405bf5fe60dc1 2024-11-20T22:26:14,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/cec73974f8894716817cdf1b76ce9f92 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/cec73974f8894716817cdf1b76ce9f92 2024-11-20T22:26:14,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/38a4955662134fdeb6dead271c5e6718 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/38a4955662134fdeb6dead271c5e6718 2024-11-20T22:26:14,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/e8cd84c4defc4116bbc1382445c3e44a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/e8cd84c4defc4116bbc1382445c3e44a 2024-11-20T22:26:14,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/5e608102d6e84713809ac219f1a94123 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/5e608102d6e84713809ac219f1a94123 2024-11-20T22:26:14,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/6f1d232380284b0390943aba391fe930 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/6f1d232380284b0390943aba391fe930 2024-11-20T22:26:14,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/8996ba7ae2f74acc932b5f585b3e1533 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/8996ba7ae2f74acc932b5f585b3e1533 2024-11-20T22:26:14,174 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/ee61610e93864c2db2420e53d554ce04 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/ee61610e93864c2db2420e53d554ce04 2024-11-20T22:26:14,176 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/effb1230864e4465b68a76cd4943407e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/effb1230864e4465b68a76cd4943407e 2024-11-20T22:26:14,177 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/32f18a1deb0341f98ac0505dd25311a6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/32f18a1deb0341f98ac0505dd25311a6 2024-11-20T22:26:14,178 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/533c1432a03f44dc866a76b869999c6d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/533c1432a03f44dc866a76b869999c6d 2024-11-20T22:26:14,180 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/0681f1f346c84ac0aeb69eff63d926a7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/0681f1f346c84ac0aeb69eff63d926a7 2024-11-20T22:26:14,181 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/2274906e86a147e391367d2a2f188dc0 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/2274906e86a147e391367d2a2f188dc0 2024-11-20T22:26:14,182 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/bece1de248a94f7b90e30e270895f479 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/bece1de248a94f7b90e30e270895f479 2024-11-20T22:26:14,183 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/21cc62b924294969b7a8cf58f89d7cb1 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/21cc62b924294969b7a8cf58f89d7cb1 2024-11-20T22:26:14,184 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/f9b07389372548bebe77fcbde9e16fa3 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/f9b07389372548bebe77fcbde9e16fa3 2024-11-20T22:26:14,187 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/79adc2759d924d82b371fdf86efe10ac to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/79adc2759d924d82b371fdf86efe10ac 2024-11-20T22:26:14,188 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f5683d5f9f8a4378997b29ea792f1e0f, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/4294785826d847dcb0d181f442a11fd7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/bed795af1d874d728594d40154e0ed79, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ef46413e76004a8bb9d6fdb383b366e6, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/8cb205eb99b74406a9c60b9349f251b4, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f1364c7f7a0f4d618eac1aebc83700d8, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/e2dc43ee86d4475f8ef2f5466484cf73, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f2676ea35aff4045b26fde3cbb73128e, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/9c9aa8ec01c14b979615280e6f5c2d05, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/0a6c07cc6f8f4022ae56b4daaf9aaff5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/6774f013e21b48d18a8dec9afcb11d91, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/c9b7246ebb5f4ffeac6930e47990bf93, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/b16db0f1de5f4168a3a04592deebcff7, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/96110421a6ca48ccb340cc252c8b94a3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/e976953aabce4d1b8d9318622d1ccd22, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/5be380a7c9804f7b97c01e746aaf202d, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ce3eba096fb64137a85ae1788a674277, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/a48de99023ff4f058835fb6cdd46e708, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/a7790fca00c441e995d66a279d454092, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/024824ca5b714084af649f14a2ca596a, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/b2a05953115e4d91b528d8cf974fb108, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/29d7b219cbff4d08aed5b9e6aaf7c30b, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/c48922ff989b49d984dce2e5905562a3, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/d8dacfce40aa4547a5c092002f1600a5, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ec49a2dc5e6147a386d635225335cf85] to archive 2024-11-20T22:26:14,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:26:14,195 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f5683d5f9f8a4378997b29ea792f1e0f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f5683d5f9f8a4378997b29ea792f1e0f 2024-11-20T22:26:14,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/4294785826d847dcb0d181f442a11fd7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/4294785826d847dcb0d181f442a11fd7 2024-11-20T22:26:14,200 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/bed795af1d874d728594d40154e0ed79 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/bed795af1d874d728594d40154e0ed79 2024-11-20T22:26:14,203 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ef46413e76004a8bb9d6fdb383b366e6 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ef46413e76004a8bb9d6fdb383b366e6 2024-11-20T22:26:14,206 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/8cb205eb99b74406a9c60b9349f251b4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/8cb205eb99b74406a9c60b9349f251b4 2024-11-20T22:26:14,207 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f1364c7f7a0f4d618eac1aebc83700d8 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f1364c7f7a0f4d618eac1aebc83700d8 2024-11-20T22:26:14,210 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/e2dc43ee86d4475f8ef2f5466484cf73 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/e2dc43ee86d4475f8ef2f5466484cf73 2024-11-20T22:26:14,225 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f2676ea35aff4045b26fde3cbb73128e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/f2676ea35aff4045b26fde3cbb73128e 2024-11-20T22:26:14,231 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/9c9aa8ec01c14b979615280e6f5c2d05 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/9c9aa8ec01c14b979615280e6f5c2d05 2024-11-20T22:26:14,233 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/0a6c07cc6f8f4022ae56b4daaf9aaff5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/0a6c07cc6f8f4022ae56b4daaf9aaff5 2024-11-20T22:26:14,235 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/6774f013e21b48d18a8dec9afcb11d91 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/6774f013e21b48d18a8dec9afcb11d91 2024-11-20T22:26:14,237 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/c9b7246ebb5f4ffeac6930e47990bf93 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/c9b7246ebb5f4ffeac6930e47990bf93 2024-11-20T22:26:14,240 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/b16db0f1de5f4168a3a04592deebcff7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/b16db0f1de5f4168a3a04592deebcff7 2024-11-20T22:26:14,242 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/96110421a6ca48ccb340cc252c8b94a3 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/96110421a6ca48ccb340cc252c8b94a3 2024-11-20T22:26:14,244 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/e976953aabce4d1b8d9318622d1ccd22 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/e976953aabce4d1b8d9318622d1ccd22 2024-11-20T22:26:14,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/5be380a7c9804f7b97c01e746aaf202d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/5be380a7c9804f7b97c01e746aaf202d 2024-11-20T22:26:14,247 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ce3eba096fb64137a85ae1788a674277 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ce3eba096fb64137a85ae1788a674277 2024-11-20T22:26:14,249 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/a48de99023ff4f058835fb6cdd46e708 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/a48de99023ff4f058835fb6cdd46e708 2024-11-20T22:26:14,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/a7790fca00c441e995d66a279d454092 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/a7790fca00c441e995d66a279d454092 2024-11-20T22:26:14,252 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/024824ca5b714084af649f14a2ca596a to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/024824ca5b714084af649f14a2ca596a 2024-11-20T22:26:14,254 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/b2a05953115e4d91b528d8cf974fb108 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/b2a05953115e4d91b528d8cf974fb108 2024-11-20T22:26:14,264 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/29d7b219cbff4d08aed5b9e6aaf7c30b to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/29d7b219cbff4d08aed5b9e6aaf7c30b 2024-11-20T22:26:14,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/c48922ff989b49d984dce2e5905562a3 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/c48922ff989b49d984dce2e5905562a3 2024-11-20T22:26:14,274 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/d8dacfce40aa4547a5c092002f1600a5 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/d8dacfce40aa4547a5c092002f1600a5 2024-11-20T22:26:14,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ec49a2dc5e6147a386d635225335cf85 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/ec49a2dc5e6147a386d635225335cf85 2024-11-20T22:26:14,281 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/recovered.edits/383.seqid, newMaxSeqId=383, maxSeqId=4 2024-11-20T22:26:14,283 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e. 2024-11-20T22:26:14,283 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] regionserver.HRegion(1635): Region close journal for bacf880704606b04149d74a04b9ceb0e: 2024-11-20T22:26:14,284 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=176}] handler.UnassignRegionHandler(170): Closed bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,285 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=bacf880704606b04149d74a04b9ceb0e, regionState=CLOSED 2024-11-20T22:26:14,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-20T22:26:14,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; CloseRegionProcedure bacf880704606b04149d74a04b9ceb0e, server=6365a1e51efd,44631,1732141399950 in 916 msec 2024-11-20T22:26:14,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-20T22:26:14,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 932 msec 2024-11-20T22:26:14,293 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141574293"}]},"ts":"1732141574293"} 2024-11-20T22:26:14,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=175, resume processing ppid=174 2024-11-20T22:26:14,294 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bacf880704606b04149d74a04b9ceb0e, UNASSIGN in 926 msec 2024-11-20T22:26:14,294 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T22:26:14,341 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T22:26:14,343 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.0150 sec 2024-11-20T22:26:14,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T22:26:14,433 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-20T22:26:14,433 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T22:26:14,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:14,434 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=177, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:14,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T22:26:14,435 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=177, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:14,436 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,438 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C, FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/recovered.edits] 2024-11-20T22:26:14,440 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2773da85e75c49f89b9e059d9dedb997 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2773da85e75c49f89b9e059d9dedb997 2024-11-20T22:26:14,441 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2a609136f62b4c4c99381323e5e5b827 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/2a609136f62b4c4c99381323e5e5b827 2024-11-20T22:26:14,441 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/838a3add16b140f998b181113b374c7f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/A/838a3add16b140f998b181113b374c7f 2024-11-20T22:26:14,443 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/37b9cfe5654048febef4ae93f496693d to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/37b9cfe5654048febef4ae93f496693d 2024-11-20T22:26:14,444 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/39577715e5164c09bcee382a1dda0fb7 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/39577715e5164c09bcee382a1dda0fb7 2024-11-20T22:26:14,445 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/98b73669317a4c0d9d5f19cbd079df06 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/B/98b73669317a4c0d9d5f19cbd079df06 2024-11-20T22:26:14,447 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/317e27ddfaa74e2680954edcfd0200a4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/317e27ddfaa74e2680954edcfd0200a4 2024-11-20T22:26:14,448 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/84718d960c3144bb88da006701a271b4 to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/84718d960c3144bb88da006701a271b4 2024-11-20T22:26:14,449 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/958a5003515a426d89af45e8142b240f to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/C/958a5003515a426d89af45e8142b240f 2024-11-20T22:26:14,451 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/recovered.edits/383.seqid to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e/recovered.edits/383.seqid 2024-11-20T22:26:14,451 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/default/TestAcidGuarantees/bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,451 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T22:26:14,451 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T22:26:14,452 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T22:26:14,454 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200371963a5a87453ab085f2fc5e0dfda9_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200371963a5a87453ab085f2fc5e0dfda9_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,455 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201642dfa70c8f4ed79d360d158327ad0c_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201642dfa70c8f4ed79d360d158327ad0c_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,456 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201a39e4380bd849ac9625a8e1a21b64a8_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201a39e4380bd849ac9625a8e1a21b64a8_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,457 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201c7b6a6d2faf443daac6e48fcae9ccf1_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201c7b6a6d2faf443daac6e48fcae9ccf1_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,458 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201ce227a7661c4921b62cb3a72cffb989_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201ce227a7661c4921b62cb3a72cffb989_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,459 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201e5b08d783904ce9849042672e273f46_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201e5b08d783904ce9849042672e273f46_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,459 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112020e47ffbb2d143a9931dfb168adda517_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112020e47ffbb2d143a9931dfb168adda517_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,461 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202cdf2add2fd14dd7a9d02e3796a24812_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202cdf2add2fd14dd7a9d02e3796a24812_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,461 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202fb9e3c6d4c740b2b88102da21f79373_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202fb9e3c6d4c740b2b88102da21f79373_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,462 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204733b7fa31f44f6daaf847d3e0daeac0_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204733b7fa31f44f6daaf847d3e0daeac0_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,463 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112074f983ce7dcc4047bcd1cee0c05e3162_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112074f983ce7dcc4047bcd1cee0c05e3162_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,464 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120840228c8fcd04a4ab77644b621491ad5_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120840228c8fcd04a4ab77644b621491ad5_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,464 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112091e1e438bf8740b6a8b24a3eca43799e_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112091e1e438bf8740b6a8b24a3eca43799e_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,465 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a1bda10fdb7c4331b1919ca2e075273b_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a1bda10fdb7c4331b1919ca2e075273b_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,466 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aea08877219540e0abb5c286f1b4b346_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aea08877219540e0abb5c286f1b4b346_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,467 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b371495a314348e3a96e6c4237549db2_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b371495a314348e3a96e6c4237549db2_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,467 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b45c751d13af4afc95bbd31f386098ef_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b45c751d13af4afc95bbd31f386098ef_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,468 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d960475949234fe28285d668748bd115_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d960475949234fe28285d668748bd115_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,469 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e89d5dc0f855460ab28ce89654dba2e3_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e89d5dc0f855460ab28ce89654dba2e3_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,470 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f2921496ab764c31a9bfe0e2a7b00db1_bacf880704606b04149d74a04b9ceb0e to hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f2921496ab764c31a9bfe0e2a7b00db1_bacf880704606b04149d74a04b9ceb0e 2024-11-20T22:26:14,470 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T22:26:14,473 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=177, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:14,475 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T22:26:14,476 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T22:26:14,477 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=177, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:14,477 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T22:26:14,477 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732141574477"}]},"ts":"9223372036854775807"} 2024-11-20T22:26:14,488 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T22:26:14,488 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => bacf880704606b04149d74a04b9ceb0e, NAME => 'TestAcidGuarantees,,1732141549862.bacf880704606b04149d74a04b9ceb0e.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T22:26:14,488 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T22:26:14,489 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732141574488"}]},"ts":"9223372036854775807"} 2024-11-20T22:26:14,490 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T22:26:14,500 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=177, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:14,501 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 67 msec 2024-11-20T22:26:14,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41349 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T22:26:14,536 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-20T22:26:14,548 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=241 (was 237) - Thread LEAK? -, OpenFileDescriptor=459 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1102 (was 1077) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=720 (was 1551) 2024-11-20T22:26:14,548 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-20T22:26:14,548 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:26:14,548 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a3c3fb3 to 127.0.0.1:51822 2024-11-20T22:26:14,548 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:14,549 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T22:26:14,549 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1416742036, stopped=false 2024-11-20T22:26:14,549 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=6365a1e51efd,41349,1732141399102 2024-11-20T22:26:14,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T22:26:14,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:26:14,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T22:26:14,558 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-20T22:26:14,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:26:14,559 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T22:26:14,559 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T22:26:14,560 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:14,560 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6365a1e51efd,44631,1732141399950' ***** 2024-11-20T22:26:14,560 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-20T22:26:14,560 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T22:26:14,561 INFO [RS:0;6365a1e51efd:44631 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T22:26:14,561 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-20T22:26:14,561 INFO [RS:0;6365a1e51efd:44631 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T22:26:14,561 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(3579): Received CLOSE for 451355caa9251e00fdfd2d0a5e7a8871 2024-11-20T22:26:14,561 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1224): stopping server 6365a1e51efd,44631,1732141399950 2024-11-20T22:26:14,561 DEBUG [RS:0;6365a1e51efd:44631 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:14,561 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T22:26:14,561 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T22:26:14,561 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T22:26:14,561 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-20T22:26:14,562 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 451355caa9251e00fdfd2d0a5e7a8871, disabling compactions & flushes 2024-11-20T22:26:14,562 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. 2024-11-20T22:26:14,562 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. 2024-11-20T22:26:14,562 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. after waiting 0 ms 2024-11-20T22:26:14,562 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. 2024-11-20T22:26:14,562 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 451355caa9251e00fdfd2d0a5e7a8871 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-20T22:26:14,562 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-20T22:26:14,562 DEBUG [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 451355caa9251e00fdfd2d0a5e7a8871=hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871.} 2024-11-20T22:26:14,563 DEBUG [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 451355caa9251e00fdfd2d0a5e7a8871 2024-11-20T22:26:14,563 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T22:26:14,563 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T22:26:14,563 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T22:26:14,563 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T22:26:14,563 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T22:26:14,563 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-20T22:26:14,586 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/namespace/451355caa9251e00fdfd2d0a5e7a8871/.tmp/info/9acccc81b0584cfd98e27fa4fe8e81f6 is 45, key is default/info:d/1732141404718/Put/seqid=0 2024-11-20T22:26:14,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742509_1685 (size=5037) 2024-11-20T22:26:14,621 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/.tmp/info/37ee1c4595a84d34aa59e1f6a0d9f2b4 is 143, key is hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871./info:regioninfo/1732141404623/Put/seqid=0 2024-11-20T22:26:14,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742510_1686 (size=7725) 2024-11-20T22:26:14,645 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/.tmp/info/37ee1c4595a84d34aa59e1f6a0d9f2b4 2024-11-20T22:26:14,647 INFO [regionserver/6365a1e51efd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T22:26:14,685 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/.tmp/rep_barrier/9033ee908fe44348b3597a704452c92a is 102, key is TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb./rep_barrier:/1732141431341/DeleteFamily/seqid=0 2024-11-20T22:26:14,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742511_1687 (size=6025) 2024-11-20T22:26:14,718 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/.tmp/rep_barrier/9033ee908fe44348b3597a704452c92a 2024-11-20T22:26:14,749 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/.tmp/table/596a01dc0b1d42368181fae740ebab86 is 96, key is TestAcidGuarantees,,1732141405072.36a256a4871d36dc6632cf0cdb971cbb./table:/1732141431341/DeleteFamily/seqid=0 2024-11-20T22:26:14,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742512_1688 (size=5942) 2024-11-20T22:26:14,757 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/.tmp/table/596a01dc0b1d42368181fae740ebab86 2024-11-20T22:26:14,762 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/.tmp/info/37ee1c4595a84d34aa59e1f6a0d9f2b4 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/info/37ee1c4595a84d34aa59e1f6a0d9f2b4 2024-11-20T22:26:14,766 DEBUG [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 451355caa9251e00fdfd2d0a5e7a8871 2024-11-20T22:26:14,768 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/info/37ee1c4595a84d34aa59e1f6a0d9f2b4, entries=22, sequenceid=93, filesize=7.5 K 2024-11-20T22:26:14,769 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/.tmp/rep_barrier/9033ee908fe44348b3597a704452c92a as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/rep_barrier/9033ee908fe44348b3597a704452c92a 2024-11-20T22:26:14,772 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/rep_barrier/9033ee908fe44348b3597a704452c92a, entries=6, sequenceid=93, filesize=5.9 K 2024-11-20T22:26:14,773 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/.tmp/table/596a01dc0b1d42368181fae740ebab86 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/table/596a01dc0b1d42368181fae740ebab86 2024-11-20T22:26:14,776 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/table/596a01dc0b1d42368181fae740ebab86, entries=9, sequenceid=93, filesize=5.8 K 2024-11-20T22:26:14,779 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 215ms, sequenceid=93, compaction requested=false 2024-11-20T22:26:14,786 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-20T22:26:14,786 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T22:26:14,786 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T22:26:14,786 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T22:26:14,786 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T22:26:14,967 DEBUG [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1629): Waiting on 451355caa9251e00fdfd2d0a5e7a8871 2024-11-20T22:26:14,968 INFO [regionserver/6365a1e51efd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T22:26:14,968 INFO [regionserver/6365a1e51efd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T22:26:15,005 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/namespace/451355caa9251e00fdfd2d0a5e7a8871/.tmp/info/9acccc81b0584cfd98e27fa4fe8e81f6 2024-11-20T22:26:15,008 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/namespace/451355caa9251e00fdfd2d0a5e7a8871/.tmp/info/9acccc81b0584cfd98e27fa4fe8e81f6 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/namespace/451355caa9251e00fdfd2d0a5e7a8871/info/9acccc81b0584cfd98e27fa4fe8e81f6 2024-11-20T22:26:15,011 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/namespace/451355caa9251e00fdfd2d0a5e7a8871/info/9acccc81b0584cfd98e27fa4fe8e81f6, entries=2, sequenceid=6, filesize=4.9 K 2024-11-20T22:26:15,012 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 451355caa9251e00fdfd2d0a5e7a8871 in 450ms, sequenceid=6, compaction requested=false 2024-11-20T22:26:15,015 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/data/hbase/namespace/451355caa9251e00fdfd2d0a5e7a8871/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T22:26:15,016 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. 2024-11-20T22:26:15,016 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 451355caa9251e00fdfd2d0a5e7a8871: 2024-11-20T22:26:15,016 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732141403356.451355caa9251e00fdfd2d0a5e7a8871. 2024-11-20T22:26:15,167 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1250): stopping server 6365a1e51efd,44631,1732141399950; all regions closed. 2024-11-20T22:26:15,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741834_1010 (size=26050) 2024-11-20T22:26:15,174 DEBUG [RS:0;6365a1e51efd:44631 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/oldWALs 2024-11-20T22:26:15,174 INFO [RS:0;6365a1e51efd:44631 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6365a1e51efd%2C44631%2C1732141399950.meta:.meta(num 1732141403064) 2024-11-20T22:26:15,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741832_1008 (size=15555285) 2024-11-20T22:26:15,179 DEBUG [RS:0;6365a1e51efd:44631 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/oldWALs 2024-11-20T22:26:15,179 INFO [RS:0;6365a1e51efd:44631 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6365a1e51efd%2C44631%2C1732141399950:(num 1732141402127) 2024-11-20T22:26:15,179 DEBUG [RS:0;6365a1e51efd:44631 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:15,179 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T22:26:15,180 INFO [RS:0;6365a1e51efd:44631 {}] hbase.ChoreService(370): Chore service for: regionserver/6365a1e51efd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-20T22:26:15,180 INFO [regionserver/6365a1e51efd:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T22:26:15,181 INFO [RS:0;6365a1e51efd:44631 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44631 2024-11-20T22:26:15,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6365a1e51efd,44631,1732141399950 2024-11-20T22:26:15,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T22:26:15,200 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6365a1e51efd,44631,1732141399950] 2024-11-20T22:26:15,200 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 6365a1e51efd,44631,1732141399950; numProcessing=1 2024-11-20T22:26:15,216 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/6365a1e51efd,44631,1732141399950 already deleted, retry=false 2024-11-20T22:26:15,216 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 6365a1e51efd,44631,1732141399950 expired; onlineServers=0 2024-11-20T22:26:15,216 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6365a1e51efd,41349,1732141399102' ***** 2024-11-20T22:26:15,216 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T22:26:15,216 DEBUG [M:0;6365a1e51efd:41349 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b50fbf3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6365a1e51efd/172.17.0.2:0 2024-11-20T22:26:15,217 INFO [M:0;6365a1e51efd:41349 {}] regionserver.HRegionServer(1224): stopping server 6365a1e51efd,41349,1732141399102 2024-11-20T22:26:15,217 INFO [M:0;6365a1e51efd:41349 {}] regionserver.HRegionServer(1250): stopping server 6365a1e51efd,41349,1732141399102; all regions closed. 2024-11-20T22:26:15,217 DEBUG [M:0;6365a1e51efd:41349 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:15,217 DEBUG [M:0;6365a1e51efd:41349 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T22:26:15,217 DEBUG [M:0;6365a1e51efd:41349 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T22:26:15,217 DEBUG [master/6365a1e51efd:0:becomeActiveMaster-HFileCleaner.small.0-1732141401796 {}] cleaner.HFileCleaner(306): Exit Thread[master/6365a1e51efd:0:becomeActiveMaster-HFileCleaner.small.0-1732141401796,5,FailOnTimeoutGroup] 2024-11-20T22:26:15,217 DEBUG [master/6365a1e51efd:0:becomeActiveMaster-HFileCleaner.large.0-1732141401795 {}] cleaner.HFileCleaner(306): Exit Thread[master/6365a1e51efd:0:becomeActiveMaster-HFileCleaner.large.0-1732141401795,5,FailOnTimeoutGroup] 2024-11-20T22:26:15,217 INFO [M:0;6365a1e51efd:41349 {}] hbase.ChoreService(370): Chore service for: master/6365a1e51efd:0 had [] on shutdown 2024-11-20T22:26:15,218 DEBUG [M:0;6365a1e51efd:41349 {}] master.HMaster(1733): Stopping service threads 2024-11-20T22:26:15,218 INFO [M:0;6365a1e51efd:41349 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T22:26:15,218 ERROR [M:0;6365a1e51efd:41349 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[IPC Client (1573750593) connection to localhost/127.0.0.1:41121 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:41121,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-20T22:26:15,218 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T22:26:15,219 INFO [M:0;6365a1e51efd:41349 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T22:26:15,219 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T22:26:15,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T22:26:15,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:26:15,225 DEBUG [M:0;6365a1e51efd:41349 {}] zookeeper.ZKUtil(347): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T22:26:15,225 WARN [M:0;6365a1e51efd:41349 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T22:26:15,225 INFO [M:0;6365a1e51efd:41349 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-20T22:26:15,225 INFO [M:0;6365a1e51efd:41349 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T22:26:15,225 DEBUG [M:0;6365a1e51efd:41349 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T22:26:15,225 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T22:26:15,225 INFO [M:0;6365a1e51efd:41349 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:26:15,225 DEBUG [M:0;6365a1e51efd:41349 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:26:15,225 DEBUG [M:0;6365a1e51efd:41349 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T22:26:15,225 DEBUG [M:0;6365a1e51efd:41349 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:26:15,225 INFO [M:0;6365a1e51efd:41349 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=782.97 KB heapSize=963.76 KB 2024-11-20T22:26:15,267 DEBUG [M:0;6365a1e51efd:41349 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0fc5ffe24ac7478e9a63f2f81ba9c0c7 is 82, key is hbase:meta,,1/info:regioninfo/1732141403193/Put/seqid=0 2024-11-20T22:26:15,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742513_1689 (size=5672) 2024-11-20T22:26:15,287 INFO [M:0;6365a1e51efd:41349 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2234 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0fc5ffe24ac7478e9a63f2f81ba9c0c7 2024-11-20T22:26:15,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T22:26:15,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44631-0x1015ba1db0a0001, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T22:26:15,309 INFO [RS:0;6365a1e51efd:44631 {}] regionserver.HRegionServer(1307): Exiting; stopping=6365a1e51efd,44631,1732141399950; zookeeper connection closed. 2024-11-20T22:26:15,309 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1d7c4238 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1d7c4238 2024-11-20T22:26:15,311 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T22:26:15,340 DEBUG [M:0;6365a1e51efd:41349 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8976e89f1019458088f61c838882415d is 2277, key is \x00\x00\x00\x00\x00\x00\x00*/proc:d/1732141433590/Put/seqid=0 2024-11-20T22:26:15,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742514_1690 (size=44672) 2024-11-20T22:26:15,399 INFO [M:0;6365a1e51efd:41349 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=782.41 KB at sequenceid=2234 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8976e89f1019458088f61c838882415d 2024-11-20T22:26:15,417 INFO [M:0;6365a1e51efd:41349 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8976e89f1019458088f61c838882415d 2024-11-20T22:26:15,472 DEBUG [M:0;6365a1e51efd:41349 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/862cf26833d7473f8673df41db54912f is 69, key is 6365a1e51efd,44631,1732141399950/rs:state/1732141401873/Put/seqid=0 2024-11-20T22:26:15,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073742515_1691 (size=5156) 2024-11-20T22:26:15,910 INFO [M:0;6365a1e51efd:41349 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2234 (bloomFilter=true), to=hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/862cf26833d7473f8673df41db54912f 2024-11-20T22:26:15,913 DEBUG [M:0;6365a1e51efd:41349 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0fc5ffe24ac7478e9a63f2f81ba9c0c7 as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0fc5ffe24ac7478e9a63f2f81ba9c0c7 2024-11-20T22:26:15,915 INFO [M:0;6365a1e51efd:41349 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0fc5ffe24ac7478e9a63f2f81ba9c0c7, entries=8, sequenceid=2234, filesize=5.5 K 2024-11-20T22:26:15,916 DEBUG [M:0;6365a1e51efd:41349 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8976e89f1019458088f61c838882415d as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8976e89f1019458088f61c838882415d 2024-11-20T22:26:15,918 INFO [M:0;6365a1e51efd:41349 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8976e89f1019458088f61c838882415d 2024-11-20T22:26:15,918 INFO [M:0;6365a1e51efd:41349 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8976e89f1019458088f61c838882415d, entries=177, sequenceid=2234, filesize=43.6 K 2024-11-20T22:26:15,919 DEBUG [M:0;6365a1e51efd:41349 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/862cf26833d7473f8673df41db54912f as hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/862cf26833d7473f8673df41db54912f 2024-11-20T22:26:15,921 INFO [M:0;6365a1e51efd:41349 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41121/user/jenkins/test-data/82325a6b-eed6-7428-4550-4638207c523a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/862cf26833d7473f8673df41db54912f, entries=1, sequenceid=2234, filesize=5.0 K 2024-11-20T22:26:15,922 INFO [M:0;6365a1e51efd:41349 {}] regionserver.HRegion(3040): Finished flush of dataSize ~782.97 KB/801759, heapSize ~963.46 KB/986584, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 697ms, sequenceid=2234, compaction requested=false 2024-11-20T22:26:15,923 INFO [M:0;6365a1e51efd:41349 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:26:15,923 DEBUG [M:0;6365a1e51efd:41349 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T22:26:15,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34119 is added to blk_1073741830_1006 (size=948200) 2024-11-20T22:26:15,927 INFO [M:0;6365a1e51efd:41349 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-20T22:26:15,927 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T22:26:15,927 INFO [M:0;6365a1e51efd:41349 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41349 2024-11-20T22:26:15,985 DEBUG [M:0;6365a1e51efd:41349 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/6365a1e51efd,41349,1732141399102 already deleted, retry=false 2024-11-20T22:26:16,091 INFO [M:0;6365a1e51efd:41349 {}] regionserver.HRegionServer(1307): Exiting; stopping=6365a1e51efd,41349,1732141399102; zookeeper connection closed. 2024-11-20T22:26:16,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T22:26:16,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41349-0x1015ba1db0a0000, quorum=127.0.0.1:51822, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T22:26:16,100 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bd2e890{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T22:26:16,103 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d3fa6ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T22:26:16,103 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T22:26:16,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63d4d645{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T22:26:16,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57582772{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/hadoop.log.dir/,STOPPED} 2024-11-20T22:26:16,108 WARN [BP-1425926944-172.17.0.2-1732141394839 heartbeating to localhost/127.0.0.1:41121 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T22:26:16,108 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T22:26:16,109 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T22:26:16,109 WARN [BP-1425926944-172.17.0.2-1732141394839 heartbeating to localhost/127.0.0.1:41121 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1425926944-172.17.0.2-1732141394839 (Datanode Uuid add97091-1a74-4fba-98fe-43eb274f7e88) service to localhost/127.0.0.1:41121 2024-11-20T22:26:16,113 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/cluster_4bf5016f-69f0-b7cd-6e5c-cf5b66ac688f/dfs/data/data1/current/BP-1425926944-172.17.0.2-1732141394839 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T22:26:16,113 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/cluster_4bf5016f-69f0-b7cd-6e5c-cf5b66ac688f/dfs/data/data2/current/BP-1425926944-172.17.0.2-1732141394839 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T22:26:16,115 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T22:26:16,124 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f0d4558{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T22:26:16,125 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a299586{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T22:26:16,125 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T22:26:16,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@588be694{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T22:26:16,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73882ca4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ba09e604-fd1d-c554-af1c-b4778850be9d/hadoop.log.dir/,STOPPED} 2024-11-20T22:26:16,147 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-20T22:26:16,364 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down